diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 08b13a0543f..62541218442 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -65,16 +65,7 @@ updates:
       interval: weekly
       day: sunday
   - package-ecosystem: gomod
-    directory: /example/fib
-    labels:
-      - dependencies
-      - go
-      - Skip Changelog
-    schedule:
-      interval: weekly
-      day: sunday
-  - package-ecosystem: gomod
-    directory: /example/jaeger
+    directory: /example/dice
     labels:
       - dependencies
       - go
@@ -127,15 +118,6 @@ updates:
     schedule:
       interval: weekly
       day: sunday
-  - package-ecosystem: gomod
-    directory: /example/view
-    labels:
-      - dependencies
-      - go
-      - Skip Changelog
-    schedule:
-      interval: weekly
-      day: sunday
   - package-ecosystem: gomod
     directory: /example/zipkin
     labels:
@@ -145,33 +127,6 @@ updates:
     schedule:
       interval: weekly
       day: sunday
-  - package-ecosystem: gomod
-    directory: /exporters/jaeger
-    labels:
-      - dependencies
-      - go
-      - Skip Changelog
-    schedule:
-      interval: weekly
-      day: sunday
-  - package-ecosystem: gomod
-    directory: /exporters/otlp/internal/retry
-    labels:
-      - dependencies
-      - go
-      - Skip Changelog
-    schedule:
-      interval: weekly
-      day: sunday
-  - package-ecosystem: gomod
-    directory: /exporters/otlp/otlpmetric
-    labels:
-      - dependencies
-      - go
-      - Skip Changelog
-    schedule:
-      interval: weekly
-      day: sunday
   - package-ecosystem: gomod
     directory: /exporters/otlp/otlpmetric/otlpmetricgrpc
     labels:
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
index 18c29605c96..5eab8d039ba 100644
--- a/.github/workflows/benchmark.yml
+++ b/.github/workflows/benchmark.yml
@@ -1,21 +1,26 @@
 name: Benchmark
 on:
   push:
-    branches:
-      - main
+    tags:
+      - v1.* 
+  workflow_dispatch: 
+
 env:
-  DEFAULT_GO_VERSION: "1.20"
+  DEFAULT_GO_VERSION: "~1.21.3"
 jobs:
   benchmark:
     name: Benchmarks
     runs-on: ubuntu-latest
+    timeout-minutes: 15
     steps:
-      - uses: actions/checkout@v3
+      - uses: actions/checkout@v4
       - uses: actions/setup-go@v4
         with:
           go-version: ${{ env.DEFAULT_GO_VERSION }}
+          check-latest: true
+          cache-dependency-path: "**/go.sum"
       - name: Run benchmarks
-        run: make test-bench | tee output.txt
+        run: make benchmark | tee output.txt
       - name: Download previous benchmark data
         uses: actions/cache@v3
         with:
@@ -28,6 +33,8 @@ jobs:
           tool: 'go'
           output-file-path: output.txt
           external-data-json-path: ./benchmarks/data.json
-          auto-push: false
+          github-token: ${{ secrets.GITHUB_TOKEN }}
+          gh-pages-branch: benchmarks
+          auto-push: true
           fail-on-alert: false
           alert-threshold: "400%"
diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml
index a1bae7d71ce..2cd1e4d2676 100644
--- a/.github/workflows/changelog.yml
+++ b/.github/workflows/changelog.yml
@@ -16,7 +16,7 @@ jobs:
     if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies') && !contains(github.event.pull_request.labels.*.name, 'Skip Changelog') && !contains(github.event.pull_request.title, '[chore]')}}
 
     steps:
-      - uses: actions/checkout@v3
+      - uses: actions/checkout@v4
 
       - name: Check for CHANGELOG changes
         run: |
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index cc6290a5e72..eb8c715f642 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -14,28 +14,25 @@ env:
   # backwards compatibility with the previous two minor releases and we
   # explicitly test our code for these versions so keeping this at prior
   # versions does not add value.
-  DEFAULT_GO_VERSION: "1.20"
+  DEFAULT_GO_VERSION: "~1.21.3"
 jobs:
   lint:
     runs-on: ubuntu-latest
     steps:
       - name: Checkout Repo
-        uses: actions/checkout@v3
-      - name: Setup Environment
-        run: |
-          echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV
-          echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
+        uses: actions/checkout@v4
       - name: Install Go
         uses: actions/setup-go@v4
         with:
           go-version: ${{ env.DEFAULT_GO_VERSION }}
+          check-latest: true
           cache-dependency-path: "**/go.sum"
       - name: Tools cache
         uses: actions/cache@v3
         env:
           cache-name: go-tools-cache
         with:
-          path: ~/.tools
+          path: .tools
           key: ${{ runner.os }}-${{ env.cache-name }}-${{ hashFiles('./internal/tools/**') }}
       - name: Generate
         run: make generate
@@ -45,12 +42,11 @@ jobs:
         run: make build
       - name: Check clean repository
         run: make check-clean-work-tree
-
-  test-race:
+  test-bench:
     runs-on: ubuntu-latest
     steps:
       - name: Checkout Repo
-        uses: actions/checkout@v3
+        uses: actions/checkout@v4
       - name: Setup Environment
         run: |
           echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV
@@ -60,6 +56,20 @@ jobs:
         with:
           go-version: ${{ env.DEFAULT_GO_VERSION }}
           cache-dependency-path: "**/go.sum"
+      - name: Run benchmarks to check functionality
+        run: make test-bench
+
+  test-race:
+    runs-on: ubuntu-latest
+    steps:
+      - name: Checkout Repo
+        uses: actions/checkout@v4
+      - name: Install Go
+        uses: actions/setup-go@v4
+        with:
+          go-version: ${{ env.DEFAULT_GO_VERSION }}
+          check-latest: true
+          cache-dependency-path: "**/go.sum"
       - name: Run tests with race detector
         run: make test-race
 
@@ -67,15 +77,12 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - name: Checkout Repo
-        uses: actions/checkout@v3
-      - name: Setup Environment
-        run: |
-          echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV
-          echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
+        uses: actions/checkout@v4
       - name: Install Go
         uses: actions/setup-go@v4
         with:
           go-version: ${{ env.DEFAULT_GO_VERSION }}
+          check-latest: true
           cache-dependency-path: "**/go.sum"
       - name: Run coverage tests
         run: |
@@ -99,7 +106,7 @@ jobs:
   compatibility-test:
     strategy:
       matrix:
-        go-version: ["1.20", 1.19]
+        go-version: ["~1.21.3", "~1.20.10"]
         os: [ubuntu-latest, macos-latest, windows-latest]
         # GitHub Actions does not support arm* architectures on default
         # runners. It is possible to accomplish this with a self-hosted runner
@@ -113,16 +120,12 @@ jobs:
     runs-on: ${{ matrix.os }}
     steps:
       - name: Checkout code
-        uses: actions/checkout@v3
-      - name: Setup Environment
-        run: |
-          echo "GOPATH=$(go env GOPATH)" >> $GITHUB_ENV
-          echo "$(go env GOPATH)/bin" >> $GITHUB_PATH
-        shell: bash
+        uses: actions/checkout@v4
       - name: Install Go
         uses: actions/setup-go@v4
         with:
           go-version: ${{ matrix.go-version }}
+          check-latest: true
           cache-dependency-path: "**/go.sum"
       - name: Run tests
         env:
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 72ab609eb45..67ad856d2cf 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -13,6 +13,9 @@ on:
     #        │  │ │ │ │
     #        *  * * * *
     - cron: '30 1 * * *'
+  push:
+    branches: [ main ]
+  pull_request:
 
 jobs:
   CodeQL-Build:
@@ -20,7 +23,7 @@ jobs:
 
     steps:
       - name: Checkout repository
-        uses: actions/checkout@v3
+        uses: actions/checkout@v4
 
       # Initializes the CodeQL tools for scanning.
       - name: Initialize CodeQL
diff --git a/.github/workflows/codespell.yaml b/.github/workflows/codespell.yaml
index 83b68e1fd41..774c020cb68 100644
--- a/.github/workflows/codespell.yaml
+++ b/.github/workflows/codespell.yaml
@@ -9,6 +9,6 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - name: Checkout Repo
-        uses: actions/checkout@v3
+        uses: actions/checkout@v4
       - name: Codespell
         run: make codespell
diff --git a/.github/workflows/create-dependabot-pr.yml b/.github/workflows/create-dependabot-pr.yml
index 6506a59f225..3d47df56ab1 100644
--- a/.github/workflows/create-dependabot-pr.yml
+++ b/.github/workflows/create-dependabot-pr.yml
@@ -10,9 +10,11 @@ jobs:
       - name: Install Go
         uses: actions/setup-go@v4
         with:
-          go-version: 1.19
+          go-version: "~1.21.3"
+          check-latest: true
+          cache-dependency-path: "**/go.sum"
 
-      - uses: actions/checkout@v3
+      - uses: actions/checkout@v4
 
       - name: Install zsh
         run: sudo apt-get update; sudo apt-get install zsh
diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml
index 2de6b06d401..c74d60f1638 100644
--- a/.github/workflows/dependabot.yml
+++ b/.github/workflows/dependabot.yml
@@ -8,18 +8,20 @@ jobs:
     if: ${{ contains(github.event.pull_request.labels.*.name, 'dependencies') }}
     runs-on: ubuntu-latest
     steps:
-    - uses: actions/checkout@v3
+    - uses: actions/checkout@v4
       with:
         ref: ${{ github.head_ref }}
     - uses: actions/setup-go@v4
       with:
-        go-version: '^1.20.0'
+        go-version: "~1.21.3"
+        check-latest: true
+        cache-dependency-path: "**/go.sum"
     - uses: evantorrie/mott-the-tidier@v1-beta
       id: modtidy
       with:
         gomods: '**/go.mod'
         gomodsum_only: true
-    - uses: stefanzweifel/git-auto-commit-action@v4
+    - uses: stefanzweifel/git-auto-commit-action@v5
       id: autocommit
       with:
         commit_message: Auto-fix go.sum changes in dependent modules
diff --git a/.github/workflows/gosec.yml b/.github/workflows/gosec.yml
deleted file mode 100644
index c0c1c621b16..00000000000
--- a/.github/workflows/gosec.yml
+++ /dev/null
@@ -1,27 +0,0 @@
-name: Run Gosec
-on:
-  workflow_dispatch:
-  schedule:
-    #        ┌───────────── minute (0 - 59)
-    #        │  ┌───────────── hour (0 - 23)
-    #        │  │ ┌───────────── day of the month (1 - 31)
-    #        │  │ │ ┌───────────── month (1 - 12 or JAN-DEC)
-    #        │  │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT)
-    #        │  │ │ │ │
-    #        │  │ │ │ │
-    #        │  │ │ │ │
-    #        *  * * * *
-    - cron: '30 2 * * *'
-jobs:
-  tests:
-    runs-on: ubuntu-latest
-    env:
-      GO111MODULE: on
-    steps:
-      - name: Checkout Source
-        uses: actions/checkout@v3
-      - name: Run Gosec Security Scanner
-        uses: securego/gosec@master
-        with:
-          args: ./...
-
diff --git a/.github/workflows/links-fail-fast.yml b/.github/workflows/links-fail-fast.yml
index 70a2ab0cc2d..cfa1473d162 100644
--- a/.github/workflows/links-fail-fast.yml
+++ b/.github/workflows/links-fail-fast.yml
@@ -8,7 +8,7 @@ jobs:
   check-links:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/checkout@v3
+      - uses: actions/checkout@v4
 
       - name: Link Checker
         uses: lycheeverse/lychee-action@v1.8.0
diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml
index 50184ce597a..423438dbeb5 100644
--- a/.github/workflows/links.yml
+++ b/.github/workflows/links.yml
@@ -12,7 +12,7 @@ jobs:
     runs-on: ubuntu-latest
     steps:
     - name: Checkout Repo
-      uses: actions/checkout@v3
+      uses: actions/checkout@v4
 
     - name: Link Checker
       id: lychee
diff --git a/.github/workflows/markdown-fail-fast.yml b/.github/workflows/markdown-fail-fast.yml
index 1e4a5cd5342..781be0253e8 100644
--- a/.github/workflows/markdown-fail-fast.yml
+++ b/.github/workflows/markdown-fail-fast.yml
@@ -12,7 +12,7 @@ jobs:
       md: ${{ steps.changes.outputs.md }}
     steps:
       - name: Checkout Repo
-        uses: actions/checkout@v3
+        uses: actions/checkout@v4
         with:
           fetch-depth: 0
       - name: Get changed files
@@ -27,7 +27,7 @@ jobs:
     if: ${{needs.changedfiles.outputs.md}}
     steps:
     - name: Checkout Repo
-      uses: actions/checkout@v3
+      uses: actions/checkout@v4
     - name: Run linter
       uses: docker://avtodev/markdown-lint:v1
       with:
diff --git a/.github/workflows/markdown.yml b/.github/workflows/markdown.yml
index 02725739c75..b82b9002df5 100644
--- a/.github/workflows/markdown.yml
+++ b/.github/workflows/markdown.yml
@@ -12,7 +12,7 @@ jobs:
     runs-on: ubuntu-latest
     steps:
     - name: Checkout Repo
-      uses: actions/checkout@v3
+      uses: actions/checkout@v4
 
     - name: Run linter
       id: markdownlint
diff --git a/.gitignore b/.gitignore
index aa699376225..895c7664beb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,12 +13,10 @@ go.work.sum
 
 gen/
 
-/example/fib/fib
-/example/fib/traces.txt
-/example/jaeger/jaeger
+/example/dice/dice
 /example/namedtracer/namedtracer
+/example/otel-collector/otel-collector
 /example/opencensus/opencensus
 /example/passthrough/passthrough
 /example/prometheus/prometheus
 /example/zipkin/zipkin
-/example/otel-collector/otel-collector
diff --git a/.golangci.yml b/.golangci.yml
index 61782fbf0dd..a62511f382e 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -12,8 +12,9 @@ linters:
     - depguard
     - errcheck
     - godot
-    - gofmt
+    - gofumpt
     - goimports
+    - gosec
     - gosimple
     - govet
     - ineffassign
@@ -53,6 +54,20 @@ issues:
       text: "calls to (.+) only in main[(][)] or init[(][)] functions"
       linters:
         - revive
+    # It's okay to not run gosec in a test.
+    - path: _test\.go
+      linters:
+        - gosec
+    # Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
+    # as we commonly use it in tests and examples.
+    - text: "G404:"
+      linters:
+        - gosec
+    # Igonoring gosec G402: TLS MinVersion too low
+    # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
+    - text: "G402: TLS MinVersion too low."
+      linters:
+        - gosec
   include:
     # revive exported should have comment or be unexported.
     - EXC0012
@@ -76,11 +91,6 @@ linters-settings:
       otlp-internal:
         files:
           - "!**/exporters/otlp/internal/**/*.go"
-          # TODO: remove the following when otlpmetric/internal is removed.
-          - "!**/exporters/otlp/otlpmetric/internal/oconf/envconfig.go"
-          - "!**/exporters/otlp/otlpmetric/internal/oconf/options.go"
-          - "!**/exporters/otlp/otlpmetric/internal/oconf/options_test.go"
-          - "!**/exporters/otlp/otlpmetric/internal/otest/client_test.go"
         deny:
           - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal"
             desc: Do not use cross-module internal packages.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 774ec6edb82..a0abcf5fb1d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,47 +10,205 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
 
 ### Added
 
-- Add `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
-- Add `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
+  The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
+- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
+
+### Changed
+
+- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
+- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
+
+## [1.21.0/0.44.0] 2023-11-16
+
+### Removed
+
+- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
+- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
+- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
+- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
+
+### Fixed
+
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
+
+## [1.20.0/0.43.0] 2023-11-10
+
+This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementors need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
+
+### Added
+
+- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
+- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
+- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
+- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
+- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
+- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
+- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
+- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
+- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
+- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
+- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
+  Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
+- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
+
+### Changed
+
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
+- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
+  This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
+  Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+  See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
+  This extends the `Tracer` interface and is is a breaking change for any existing implementation.
+  Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+  See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
+  This extends the `Span` interface and is is a breaking change for any existing implementation.
+  Implementors need to update their implementations based on what they want the default behavior of the interface to be.
+  See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
+
+### Fixed
+
+- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
+- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
+
+## [1.19.0/0.42.0/0.0.7] 2023-09-28
+
+This release contains the first stable release of the OpenTelemetry Go [metric SDK].
+Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539)
+- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507)
+
+### Changed
+
+- Allow '/' characters in metric instrument names. (#4501)
+- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507)
+- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535)
+
+### Fixed
+
+- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
+
+### Removed
+
+- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566)
+
+## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14
+
+This is a release candidate for the v1.19.0/v0.42.0 release.
+That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Changed
+
+- Allow '/' characters in metric instrument names. (#4501)
+
+### Fixed
+
+- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
+
+## [1.18.0/0.41.0/0.0.6] 2023-09-12
+
+This release drops the compatibility guarantee of [Go 1.19].
+
+### Added
+
+- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473)
+- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447)
+
+### Changed
+
+- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541).
+  The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470)
+
+### Removed
+
+- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467)
+- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467)
+- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468)
+- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469)
+- Dropped guaranteed support for versions of Go less than 1.20. (#4481)
+
+## [1.17.0/0.40.0/0.0.5] 2023-08-28
+
+### Added
+
+- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
 - Add support for exponential histogram aggregations.
-  A histogram can be configured as an exponential histogram using a view with `go.opentelemetry.io/otel/sdk/metric/aggregation.ExponentialHistogram` as the aggregation. (#4245)
-- Add `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
-- Add `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
-- OTLP Metrics Exporter now supports the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
+  A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
 - Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306)
-- Add info and debug logging to the metric SDK. (#4315)
+- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315)
 - The `go.opentelemetry.io/otel/semconv/v1.21.0` package.
   The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362)
 - Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365)
 - Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381)
-- Expand the set of units supported by the prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
+- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
 - Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435)
-- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
+- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444)
+- Support Go 1.21. (#4463)
 
 ### Changed
 
 - Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145)
 - Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202)
-- Return an error on the creation of new instruments if their name doesn't pass regexp validation. (#4210)
+- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210)
 - `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244)
 - `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244)
-- Count the Collect time in the PeriodicReader timeout. (#4221)
-- `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
-- `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
-- ⚠️ Metrics SDK Breaking ⚠️ : the `AttributeFilter` fields of the `Stream` from `go.opentelemetry.io/otel/sdk/metric` is replaced by the `AttributeKeys` field.
-  The `AttributeKeys` fields allows users to specify an allow-list of attributes allowed to be recorded for a view.
-  This change is made to ensure compatibility with the OpenTelemetry specification. (#4288)
-- If an attribute set is omitted from an async callback, the previous value will no longer be exported. (#4290)
-- If an attribute set is Observed multiple times in an async callback, the values will be summed instead of the last observation winning. (#4289)
+- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290)
+- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289)
 - Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332)
 - Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333)
 - `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377)
 - Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408)
-- Increase instrument name maximum length from 63 to 255 characters. (#4434)
-- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an Option for metric.NewManualReader and metric.NewPeriodicReader, and remove `Reader.RegisterProducer()` (#4346)
+- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434)
+- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346)
 
 ### Removed
 
+- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`.
+  Use the added `WithProducer` option instead. (#4346)
 - Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`.
   Notice that `PeriodicReader.ForceFlush` is still available. (#4375)
 
@@ -58,21 +216,21 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
 
 - Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143)
 - Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307)
-- Fix `resource.WithHostID()` to not set an empty `host.id`. (#4317)
+- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317)
 - Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337)
 - Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338)
-- The `ManualReader` will not panic if `AggregationSelector` returns `nil`. (#4350)
-- If a Reader's AggregationSelector return nil or DefaultAggregation the pipeline will use the default aggregation. (#4350)
+- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350)
+- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350)
 - Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349)
 - Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
-- Improve context cancelation handling in batch span processor's `ForceFlush` in  `go.opentelemetry.io/otel/sdk/trace`. (#4369)
+- Improve context cancellation handling in batch span processor's `ForceFlush` in  `go.opentelemetry.io/otel/sdk/trace`. (#4369)
 - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
 - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
 - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846)
 - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
 - Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
 - Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
-- Do not append _total if the counter already ends in total `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
+- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
 - Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409)
 - Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428)
 
@@ -2588,7 +2746,13 @@ It contains api and sdk for trace and meter.
 - CircleCI build CI manifest files.
 - CODEOWNERS file to track owners of this project.
 
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.16.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD
+[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
+[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
+[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
+[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
+[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
+[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0
 [1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
 [1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
 [1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
@@ -2661,3 +2825,5 @@ It contains api and sdk for trace and meter.
 [Go 1.18]: https://go.dev/doc/go1.18
 
 [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
+[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
+[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a00dbca7b08..850606ae692 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -90,6 +90,10 @@ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
 Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
 request ID to the entry you added to `CHANGELOG.md`.
 
+Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
+Rewriting Git history makes it difficult to keep track of iterations during code review.
+All pull requests are squashed to a single commit upon merge to `main`.
+
 ### How to Receive Comments
 
 * If the PR is not ready for review, please put `[WIP]` in the title,
diff --git a/Makefile b/Makefile
index c996d227bea..35fc189961b 100644
--- a/Makefile
+++ b/Makefile
@@ -77,6 +77,9 @@ $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
 GORELEASE = $(TOOLS)/gorelease
 $(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
 
+GOVULNCHECK = $(TOOLS)/govulncheck
+$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
+
 .PHONY: tools
 tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
 
@@ -189,6 +192,18 @@ test-coverage: | $(GOCOVMERGE)
 	done; \
 	$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
 
+# Adding a directory will include all benchmarks in that direcotry if a filter is not specified.
+BENCHMARK_TARGETS := sdk/trace
+.PHONY: benchmark
+benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
+BENCHMARK_FILTER = .
+# You can override the filter for a particular directory by adding a rule here.
+benchmark/sdk/trace: BENCHMARK_FILTER = SpanWithAttributes_8/AlwaysSample
+benchmark/%:
+	@echo "$(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(BENCHMARK_FILTER) $*..." \
+		&& cd $* \
+		$(foreach filter, $(BENCHMARK_FILTER), && $(GO) test -timeout $(TIMEOUT)s -run=xxxxxMatchNothingxxxxx -bench=$(filter))
+
 .PHONY: golangci-lint golangci-lint-fix
 golangci-lint-fix: ARGS=--fix
 golangci-lint-fix: golangci-lint
@@ -210,13 +225,13 @@ go-mod-tidy/%: DIR=$*
 go-mod-tidy/%: | crosslink
 	@echo "$(GO) mod tidy in $(DIR)" \
 		&& cd $(DIR) \
-		&& $(GO) mod tidy -compat=1.19
+		&& $(GO) mod tidy -compat=1.20
 
 .PHONY: lint-modules
 lint-modules: go-mod-tidy
 
 .PHONY: lint
-lint: misspell lint-modules golangci-lint
+lint: misspell lint-modules golangci-lint govulncheck
 
 .PHONY: vanity-import-check
 vanity-import-check: | $(PORTO)
@@ -226,6 +241,14 @@ vanity-import-check: | $(PORTO)
 misspell: | $(MISSPELL)
 	@$(MISSPELL) -w $(ALL_DOCS)
 
+.PHONY: govulncheck
+govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
+govulncheck/%: DIR=$*
+govulncheck/%: | $(GOVULNCHECK)
+	@echo "govulncheck ./... in $(DIR)" \
+		&& cd $(DIR) \
+		&& $(GOVULNCHECK) ./...
+
 .PHONY: codespell
 codespell: | $(CODESPELL)
 	@$(DOCKERPY) $(CODESPELL)
@@ -289,3 +312,7 @@ COMMIT ?= "HEAD"
 add-tags: | $(MULTIMOD)
 	@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
 	$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
+
+.PHONY: lint-markdown
+lint-markdown: 
+	docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
diff --git a/README.md b/README.md
index 652dd175590..2c5b0cc28ab 100644
--- a/README.md
+++ b/README.md
@@ -11,16 +11,13 @@ It provides a set of APIs to directly measure performance and behavior of your s
 
 ## Project Status
 
-| Signal  | Status     | Project               |
-|---------|------------|-----------------------|
-| Traces  | Stable     | N/A                   |
-| Metrics | Mixed [1]  | [Go: Metric SDK (GA)] |
-| Logs    | Frozen [2] | N/A                   |
+| Signal  | Status     |
+|---------|------------|
+| Traces  | Stable     |
+| Metrics | Stable     |
+| Logs    | Design [1] |
 
-[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34
-
-- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta.
-- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK.
+- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
    No Logs Pull Requests are currently being accepted.
 
 Progress and status specific to this repository is tracked in our
@@ -53,16 +50,16 @@ Currently, this project supports the following environments.
 
 | OS      | Go Version | Architecture |
 |---------|------------|--------------|
+| Ubuntu  | 1.21       | amd64        |
 | Ubuntu  | 1.20       | amd64        |
-| Ubuntu  | 1.19       | amd64        |
+| Ubuntu  | 1.21       | 386          |
 | Ubuntu  | 1.20       | 386          |
-| Ubuntu  | 1.19       | 386          |
+| MacOS   | 1.21       | amd64        |
 | MacOS   | 1.20       | amd64        |
-| MacOS   | 1.19       | amd64        |
+| Windows | 1.21       | amd64        |
 | Windows | 1.20       | amd64        |
-| Windows | 1.19       | amd64        |
+| Windows | 1.21       | 386          |
 | Windows | 1.20       | 386          |
-| Windows | 1.19       | 386          |
 
 While this project should work for other systems, no compatibility guarantees
 are made for those systems currently.
diff --git a/attribute/filter.go b/attribute/filter.go
new file mode 100644
index 00000000000..638c213d59a
--- /dev/null
+++ b/attribute/filter.go
@@ -0,0 +1,60 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Filter supports removing certain attributes from attribute sets. When
+// the filter returns true, the attribute will be kept in the filtered
+// attribute set. When the filter returns false, the attribute is excluded
+// from the filtered attribute set, and the attribute instead appears in
+// the removed list of excluded attributes.
+type Filter func(KeyValue) bool
+
+// NewAllowKeysFilter returns a Filter that only allows attributes with one of
+// the provided keys.
+//
+// If keys is empty a deny-all filter is returned.
+func NewAllowKeysFilter(keys ...Key) Filter {
+	if len(keys) <= 0 {
+		return func(kv KeyValue) bool { return false }
+	}
+
+	allowed := make(map[Key]struct{})
+	for _, k := range keys {
+		allowed[k] = struct{}{}
+	}
+	return func(kv KeyValue) bool {
+		_, ok := allowed[kv.Key]
+		return ok
+	}
+}
+
+// NewDenyKeysFilter returns a Filter that only allows attributes
+// that do not have one of the provided keys.
+//
+// If keys is empty an allow-all filter is returned.
+func NewDenyKeysFilter(keys ...Key) Filter {
+	if len(keys) <= 0 {
+		return func(kv KeyValue) bool { return true }
+	}
+
+	forbid := make(map[Key]struct{})
+	for _, k := range keys {
+		forbid[k] = struct{}{}
+	}
+	return func(kv KeyValue) bool {
+		_, ok := forbid[kv.Key]
+		return !ok
+	}
+}
diff --git a/attribute/filter_test.go b/attribute/filter_test.go
new file mode 100644
index 00000000000..c668e260b83
--- /dev/null
+++ b/attribute/filter_test.go
@@ -0,0 +1,87 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package attribute
+
+import "testing"
+
+func TestNewAllowKeysFilter(t *testing.T) {
+	keys := []string{"zero", "one", "two"}
+	attrs := []KeyValue{Int(keys[0], 0), Int(keys[1], 1), Int(keys[2], 2)}
+
+	t.Run("Empty", func(t *testing.T) {
+		empty := NewAllowKeysFilter()
+		for _, kv := range attrs {
+			if empty(kv) {
+				t.Errorf("empty NewAllowKeysFilter filter accepted %v", kv)
+			}
+		}
+	})
+
+	t.Run("Partial", func(t *testing.T) {
+		partial := NewAllowKeysFilter(Key(keys[0]), Key(keys[1]))
+		for _, kv := range attrs[:2] {
+			if !partial(kv) {
+				t.Errorf("partial NewAllowKeysFilter filter denied %v", kv)
+			}
+		}
+		if partial(attrs[2]) {
+			t.Errorf("partial NewAllowKeysFilter filter accepted %v", attrs[2])
+		}
+	})
+
+	t.Run("Full", func(t *testing.T) {
+		full := NewAllowKeysFilter(Key(keys[0]), Key(keys[1]), Key(keys[2]))
+		for _, kv := range attrs {
+			if !full(kv) {
+				t.Errorf("full NewAllowKeysFilter filter denied %v", kv)
+			}
+		}
+	})
+}
+
+func TestNewDenyKeysFilter(t *testing.T) {
+	keys := []string{"zero", "one", "two"}
+	attrs := []KeyValue{Int(keys[0], 0), Int(keys[1], 1), Int(keys[2], 2)}
+
+	t.Run("Empty", func(t *testing.T) {
+		empty := NewDenyKeysFilter()
+		for _, kv := range attrs {
+			if !empty(kv) {
+				t.Errorf("empty NewDenyKeysFilter filter denied %v", kv)
+			}
+		}
+	})
+
+	t.Run("Partial", func(t *testing.T) {
+		partial := NewDenyKeysFilter(Key(keys[0]), Key(keys[1]))
+		for _, kv := range attrs[:2] {
+			if partial(kv) {
+				t.Errorf("partial NewDenyKeysFilter filter accepted %v", kv)
+			}
+		}
+		if !partial(attrs[2]) {
+			t.Errorf("partial NewDenyKeysFilter filter denied %v", attrs[2])
+		}
+	})
+
+	t.Run("Full", func(t *testing.T) {
+		full := NewDenyKeysFilter(Key(keys[0]), Key(keys[1]), Key(keys[2]))
+		for _, kv := range attrs {
+			if full(kv) {
+				t.Errorf("full NewDenyKeysFilter filter accepted %v", kv)
+			}
+		}
+	})
+}
diff --git a/attribute/key_test.go b/attribute/key_test.go
index 6d9f0fedd65..1c739714e0f 100644
--- a/attribute/key_test.go
+++ b/attribute/key_test.go
@@ -41,7 +41,7 @@ func TestDefined(t *testing.T) {
 		},
 	} {
 		t.Run(testcase.name, func(t *testing.T) {
-			//func (k attribute.Key) Defined() bool {
+			// func (k attribute.Key) Defined() bool {
 			have := testcase.k.Defined()
 			if have != testcase.want {
 				t.Errorf("Want: %v, but have: %v", testcase.want, have)
@@ -91,7 +91,7 @@ func TestEmit(t *testing.T) {
 		},
 	} {
 		t.Run(testcase.name, func(t *testing.T) {
-			//proto: func (v attribute.Value) Emit() string {
+			// proto: func (v attribute.Value) Emit() string {
 			have := testcase.v.Emit()
 			if have != testcase.want {
 				t.Errorf("Want: %s, but have: %s", testcase.want, have)
diff --git a/attribute/set.go b/attribute/set.go
index b976367e46d..9f9303d4f15 100644
--- a/attribute/set.go
+++ b/attribute/set.go
@@ -39,13 +39,6 @@ type (
 		iface interface{}
 	}
 
-	// Filter supports removing certain attributes from attribute sets. When
-	// the filter returns true, the attribute will be kept in the filtered
-	// attribute set. When the filter returns false, the attribute is excluded
-	// from the filtered attribute set, and the attribute instead appears in
-	// the removed list of excluded attributes.
-	Filter func(KeyValue) bool
-
 	// Sortable implements sort.Interface, used for sorting KeyValue. This is
 	// an exported type to support a memory optimization. A pointer to one of
 	// these is needed for the call to sort.Stable(), which the caller may
diff --git a/baggage/baggage.go b/baggage/baggage.go
index 9e6b3b7b52a..84532cb1da3 100644
--- a/baggage/baggage.go
+++ b/baggage/baggage.go
@@ -254,7 +254,7 @@ func NewMember(key, value string, props ...Property) (Member, error) {
 	if err := m.validate(); err != nil {
 		return newInvalidMember(), err
 	}
-	decodedValue, err := url.QueryUnescape(value)
+	decodedValue, err := url.PathUnescape(value)
 	if err != nil {
 		return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
 	}
@@ -301,7 +301,7 @@ func parseMember(member string) (Member, error) {
 	// when converting the header into a data structure."
 	key = strings.TrimSpace(k)
 	var err error
-	value, err = url.QueryUnescape(strings.TrimSpace(v))
+	value, err = url.PathUnescape(strings.TrimSpace(v))
 	if err != nil {
 		return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
 	}
diff --git a/baggage/baggage_test.go b/baggage/baggage_test.go
index 2b98beace10..4bac6707ea0 100644
--- a/baggage/baggage_test.go
+++ b/baggage/baggage_test.go
@@ -275,6 +275,48 @@ func TestBaggageParse(t *testing.T) {
 				"foo": {Value: "1"},
 			},
 		},
+		{
+			name: "single member no properties plus",
+			in:   "foo=1+1",
+			want: baggage.List{
+				"foo": {Value: "1+1"},
+			},
+		},
+		{
+			name: "single member no properties plus encoded",
+			in:   "foo=1%2B1",
+			want: baggage.List{
+				"foo": {Value: "1+1"},
+			},
+		},
+		{
+			name: "single member no properties slash",
+			in:   "foo=1/1",
+			want: baggage.List{
+				"foo": {Value: "1/1"},
+			},
+		},
+		{
+			name: "single member no properties slash encoded",
+			in:   "foo=1%2F1",
+			want: baggage.List{
+				"foo": {Value: "1/1"},
+			},
+		},
+		{
+			name: "single member no properties equals",
+			in:   "foo=1=1",
+			want: baggage.List{
+				"foo": {Value: "1=1"},
+			},
+		},
+		{
+			name: "single member no properties equals encoded",
+			in:   "foo=1%3D1",
+			want: baggage.List{
+				"foo": {Value: "1=1"},
+			},
+		},
 		{
 			name: "single member with spaces",
 			in:   " foo \t= 1\t\t ",
@@ -440,6 +482,13 @@ func TestBaggageString(t *testing.T) {
 				"foo": {Value: "1=1"},
 			},
 		},
+		{
+			name: "plus",
+			out:  "foo=1%2B1",
+			baggage: baggage.List{
+				"foo": {Value: "1+1"},
+			},
+		},
 		{
 			name: "single member empty value with properties",
 			out:  "foo=;red;state=on",
diff --git a/bridge/opencensus/README.md b/bridge/opencensus/README.md
deleted file mode 100644
index 3df9dc7eb07..00000000000
--- a/bridge/opencensus/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# OpenCensus Bridge
-
-The OpenCensus Bridge helps facilitate the migration of an application from OpenCensus to OpenTelemetry.
-
-## Caveat about OpenCensus
-
-Installing a metric or tracing bridge will cause OpenCensus telemetry to be exported by OpenTelemetry exporters.  Since OpenCensus telemetry uses globals, installing a bridge will result in telemetry collection from _all_ libraries that use OpenCensus, including some you may not expect.  For example ([#1928](https://github.com/open-telemetry/opentelemetry-go/issues/1928)), if a client library generates traces with OpenCensus, installing the bridge will cause those traces to be exported by OpenTelemetry.
-
-## Tracing
-
-### The Problem: Mixing OpenCensus and OpenTelemetry libraries
-
-In a perfect world, one would simply migrate their entire go application --including custom instrumentation, libraries, and exporters-- from OpenCensus to OpenTelemetry all at once.  In the real world, dependency constraints, third-party ownership of libraries, or other reasons may require mixing OpenCensus and OpenTelemetry libraries in a single application.
-
-However, if you create the following spans in a go application:
-
-```go
-ctx, ocSpan := opencensus.StartSpan(context.Background(), "OuterSpan")
-defer ocSpan.End()
-ctx, otSpan := opentelemetryTracer.Start(ctx, "MiddleSpan")
-defer otSpan.End()
-ctx, ocSpan := opencensus.StartSpan(ctx, "InnerSpan")
-defer ocSpan.End()
-```
-
-OpenCensus reports (to OpenCensus exporters):
-
-```
-[--------OuterSpan------------]
-    [----InnerSpan------]
-```
-
-OpenTelemetry reports (to OpenTelemetry exporters):
-
-```
-   [-----MiddleSpan--------]
-```
-
-Instead, I would prefer (to a single set of exporters):
-
-```
-[--------OuterSpan------------]
-   [-----MiddleSpan--------]
-    [----InnerSpan------]
-```
-
-### The bridge solution
-
-The bridge implements the OpenCensus trace API using OpenTelemetry.  This would cause, for example, a span recorded with OpenCensus' `StartSpan()` method to be equivalent to recording a span using OpenTelemetry's `tracer.Start()` method.  Funneling all tracing API calls to OpenTelemetry APIs results in the desired unified span hierarchy.
-
-### User Journey
-
-Starting from an application using entirely OpenCensus APIs:
-
-1. Instantiate OpenTelemetry SDK and Exporters
-2. Override OpenCensus' DefaultTracer with the bridge
-3. Migrate libraries individually from OpenCensus to OpenTelemetry
-4. Remove OpenCensus exporters and configuration
-
-To override OpenCensus' DefaultTracer with the bridge:
-
-```go
-import (
-	octrace "go.opencensus.io/trace"
-	"go.opentelemetry.io/otel/bridge/opencensus"
-	"go.opentelemetry.io/otel"
-)
-
-tracer := otel.GetTracerProvider().Tracer("bridge")
-octrace.DefaultTracer = opencensus.NewTracer(tracer)
-```
-
-Be sure to set the `Tracer` name to your instrumentation package name instead of `"bridge"`.
-
-#### Incompatibilities
-
-OpenCensus and OpenTelemetry APIs are not entirely compatible.  If the bridge finds any incompatibilities, it will log them.  Incompatibilities include:
-
-* Custom OpenCensus Samplers specified during StartSpan are ignored.
-* Links cannot be added to OpenCensus spans.
-* OpenTelemetry Debug or Deferred trace flags are dropped after an OpenCensus span is created.
diff --git a/bridge/opencensus/config.go b/bridge/opencensus/config.go
new file mode 100644
index 00000000000..26c94742fb1
--- /dev/null
+++ b/bridge/opencensus/config.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus"
+
+import (
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/trace"
+)
+
+const scopeName = "go.opentelemetry.io/otel/bridge/opencensus"
+
+// newTraceConfig returns a config configured with options.
+func newTraceConfig(options []TraceOption) traceConfig {
+	conf := traceConfig{tp: otel.GetTracerProvider()}
+	for _, o := range options {
+		conf = o.apply(conf)
+	}
+	return conf
+}
+
+type traceConfig struct {
+	tp trace.TracerProvider
+}
+
+// TraceOption applies a configuration option value to an OpenCensus bridge
+// Tracer.
+type TraceOption interface {
+	apply(traceConfig) traceConfig
+}
+
+// traceOptionFunc applies a set of options to a config.
+type traceOptionFunc func(traceConfig) traceConfig
+
+// apply returns a config with option(s) applied.
+func (o traceOptionFunc) apply(conf traceConfig) traceConfig {
+	return o(conf)
+}
+
+// WithTracerProvider specifies a tracer provider to use for creating a tracer.
+func WithTracerProvider(tp trace.TracerProvider) TraceOption {
+	return traceOptionFunc(func(conf traceConfig) traceConfig {
+		conf.tp = tp
+		return conf
+	})
+}
+
+type metricConfig struct{}
+
+// MetricOption applies a configuration option value to an OpenCensus bridge
+// MetricProducer.
+type MetricOption interface {
+	apply(metricConfig) metricConfig
+}
diff --git a/bridge/opencensus/config_test.go b/bridge/opencensus/config_test.go
new file mode 100644
index 00000000000..5a2011f0c98
--- /dev/null
+++ b/bridge/opencensus/config_test.go
@@ -0,0 +1,56 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus"
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/trace/noop"
+)
+
+func TestNewTraceConfig(t *testing.T) {
+	globalTP := noop.NewTracerProvider()
+	customTP := noop.NewTracerProvider()
+	otel.SetTracerProvider(globalTP)
+	for _, tc := range []struct {
+		desc     string
+		opts     []TraceOption
+		expected traceConfig
+	}{
+		{
+			desc: "default",
+			expected: traceConfig{
+				tp: globalTP,
+			},
+		},
+		{
+			desc: "overridden",
+			opts: []TraceOption{
+				WithTracerProvider(customTP),
+			},
+			expected: traceConfig{
+				tp: customTP,
+			},
+		},
+	} {
+		t.Run(tc.desc, func(t *testing.T) {
+			cfg := newTraceConfig(tc.opts)
+			assert.Equal(t, tc.expected, cfg)
+		})
+	}
+}
diff --git a/bridge/opencensus/doc.go b/bridge/opencensus/doc.go
index 80d80da6f78..70990920474 100644
--- a/bridge/opencensus/doc.go
+++ b/bridge/opencensus/doc.go
@@ -13,23 +13,49 @@
 // limitations under the License.
 
 // Package opencensus provides a migration bridge from OpenCensus to
-// OpenTelemetry. The NewTracer function should be used to create an
-// OpenCensus Tracer from an OpenTelemetry Tracer. This Tracer can be use in
-// place of any existing OpenCensus Tracer and will generate OpenTelemetry
-// spans for traces. These spans will be exported by the OpenTelemetry
-// TracerProvider the original OpenTelemetry Tracer came from.
+// OpenTelemetry for metrics and traces. The bridge incorporates metrics and
+// traces from OpenCensus into the OpenTelemetry SDK, combining them with
+// metrics and traces from OpenTelemetry instrumentation.
 //
-// There are known limitations to this bridge:
+// # Migration Guide
 //
-// - The AddLink method for OpenCensus Spans is not compatible with the
-// OpenTelemetry Span. No link can be added to an OpenTelemetry Span once it
-// is started. Any calls to this method for the OpenCensus Span will result
-// in an error being sent to the OpenTelemetry default ErrorHandler.
+// For most applications, it would be difficult to migrate an application
+// from OpenCensus to OpenTelemetry all-at-once. Libraries used by the
+// application may still be using OpenCensus, and the application itself may
+// have many lines of instrumentation.
 //
-// - The NewContext method of the OpenCensus Tracer cannot embed an OpenCensus
-// Span in a context unless that Span was created by that Tracer.
+// Bridges help in this situation by allowing your application to have "mixed"
+// instrumentation, while incorporating all instrumentation into a single
+// export path. To migrate with bridges, a user would:
 //
-// - Conversion of custom OpenCensus Samplers to OpenTelemetry is not
-// implemented. An error will be sent to the OpenTelemetry default
-// ErrorHandler if this is attempted.
+//  1. Configure the OpenTelemetry SDK for metrics and traces, with the OpenTelemetry exporters matching to your current OpenCensus exporters.
+//  2. Install this OpenCensus bridge, which sends OpenCensus telemetry to your new OpenTelemetry exporters.
+//  3. Over time, migrate your instrumentation from OpenCensus to OpenTelemetry.
+//  4. Once all instrumentation is migrated, remove the OpenCensus bridge.
+//
+// With this approach, you can migrate your telemetry, including in dependent
+// libraries over time without disruption.
+//
+// # Warnings
+//
+// Installing a metric or tracing bridge will cause OpenCensus telemetry to be
+// exported by OpenTelemetry exporters. Since OpenCensus telemetry uses globals,
+// installing a bridge will result in telemetry collection from _all_ libraries
+// that use OpenCensus, including some you may not expect, such as the
+// telemetry exporter itself.
+//
+// # Limitations
+//
+// There are known limitations to the trace bridge:
+//
+//   - The AddLink method for OpenCensus Spans is ignored, and an error is sent
+//     to the OpenTelemetry ErrorHandler.
+//   - The NewContext method of the OpenCensus Tracer cannot embed an OpenCensus
+//     Span in a context unless that Span was created by that Tracer.
+//   - Conversion of custom OpenCensus Samplers to OpenTelemetry is not
+//     implemented, and An error will be sent to the OpenTelemetry ErrorHandler.
+//
+// There are known limitations to the metric bridge:
+//   - GaugeDistribution-typed metrics are dropped
+//   - Histogram's SumOfSquaredDeviation field is dropped
 package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus"
diff --git a/bridge/opencensus/example_test.go b/bridge/opencensus/example_test.go
new file mode 100644
index 00000000000..3fa3b62179c
--- /dev/null
+++ b/bridge/opencensus/example_test.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package opencensus_test
+
+import (
+	"go.opentelemetry.io/otel/bridge/opencensus"
+	"go.opentelemetry.io/otel/sdk/metric"
+)
+
+func ExampleNewMetricProducer() {
+	// Create the OpenCensus Metric bridge.
+	bridge := opencensus.NewMetricProducer()
+	// Add the bridge as a producer to your reader.
+	// If using a push exporter, such as OTLP exporter,
+	// use metric.NewPeriodicReader with metric.WithProducer option.
+	// If using a pull exporter which acts as a reader, such as prometheus exporter,
+	// use a dedicated option like prometheus.WithProducer.
+	reader := metric.NewManualReader(metric.WithProducer(bridge))
+	// Add the reader to your MeterProvider.
+	_ = metric.NewMeterProvider(metric.WithReader(reader))
+}
diff --git a/bridge/opencensus/go.mod b/bridge/opencensus/go.mod
index b650a2ac01a..da514cd9ec8 100644
--- a/bridge/opencensus/go.mod
+++ b/bridge/opencensus/go.mod
@@ -1,25 +1,25 @@
 module go.opentelemetry.io/otel/bridge/opencensus
 
-go 1.19
+go 1.20
 
 require (
 	github.com/stretchr/testify v1.8.4
 	go.opencensus.io v0.24.0
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/sdk/metric v0.39.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/sdk/metric v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
 	github.com/kr/pretty v0.1.0 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 	gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
diff --git a/bridge/opencensus/go.sum b/bridge/opencensus/go.sum
index 04674f850c3..92b795a117d 100644
--- a/bridge/opencensus/go.sum
+++ b/bridge/opencensus/go.sum
@@ -11,8 +11,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
 github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -34,7 +34,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
 github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -74,8 +74,8 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/bridge/opencensus/internal/ocmetric/metric.go b/bridge/opencensus/internal/ocmetric/metric.go
index 3869d318cfb..8fdd0eb6167 100644
--- a/bridge/opencensus/internal/ocmetric/metric.go
+++ b/bridge/opencensus/internal/ocmetric/metric.go
@@ -17,35 +17,38 @@ package internal // import "go.opentelemetry.io/otel/bridge/opencensus/internal/
 import (
 	"errors"
 	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
 
 	ocmetricdata "go.opencensus.io/metric/metricdata"
+	octrace "go.opencensus.io/trace"
 
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/sdk/metric/metricdata"
 )
 
 var (
-	errConversion                   = errors.New("converting from OpenCensus to OpenTelemetry")
 	errAggregationType              = errors.New("unsupported OpenCensus aggregation type")
 	errMismatchedValueTypes         = errors.New("wrong value type for data point")
-	errNumberDataPoint              = errors.New("converting a number data point")
-	errHistogramDataPoint           = errors.New("converting a histogram data point")
-	errNegativeDistributionCount    = errors.New("distribution count is negative")
+	errNegativeCount                = errors.New("distribution or summary count is negative")
 	errNegativeBucketCount          = errors.New("distribution bucket count is negative")
 	errMismatchedAttributeKeyValues = errors.New("mismatched number of attribute keys and values")
+	errInvalidExemplarSpanContext   = errors.New("span context exemplar attachment does not contain an OpenCensus SpanContext")
 )
 
 // ConvertMetrics converts metric data from OpenCensus to OpenTelemetry.
 func ConvertMetrics(ocmetrics []*ocmetricdata.Metric) ([]metricdata.Metrics, error) {
 	otelMetrics := make([]metricdata.Metrics, 0, len(ocmetrics))
-	var errInfo []string
+	var err error
 	for _, ocm := range ocmetrics {
 		if ocm == nil {
 			continue
 		}
-		agg, err := convertAggregation(ocm)
-		if err != nil {
-			errInfo = append(errInfo, err.Error())
+		agg, aggregationErr := convertAggregation(ocm)
+		if aggregationErr != nil {
+			err = errors.Join(err, fmt.Errorf("error converting metric %v: %w", ocm.Descriptor.Name, aggregationErr))
 			continue
 		}
 		otelMetrics = append(otelMetrics, metricdata.Metrics{
@@ -55,11 +58,10 @@ func ConvertMetrics(ocmetrics []*ocmetricdata.Metric) ([]metricdata.Metrics, err
 			Data:        agg,
 		})
 	}
-	var aggregatedError error
-	if len(errInfo) > 0 {
-		aggregatedError = fmt.Errorf("%w: %q", errConversion, errInfo)
+	if err != nil {
+		return otelMetrics, fmt.Errorf("error converting from OpenCensus to OpenTelemetry: %w", err)
 	}
-	return otelMetrics, aggregatedError
+	return otelMetrics, nil
 }
 
 // convertAggregation produces an aggregation based on the OpenCensus Metric.
@@ -76,7 +78,8 @@ func convertAggregation(metric *ocmetricdata.Metric) (metricdata.Aggregation, er
 		return convertSum[float64](labelKeys, metric.TimeSeries)
 	case ocmetricdata.TypeCumulativeDistribution:
 		return convertHistogram(labelKeys, metric.TimeSeries)
-		// TODO: Support summaries, once it is in the OTel data types.
+	case ocmetricdata.TypeSummary:
+		return convertSummary(labelKeys, metric.TimeSeries)
 	}
 	return nil, fmt.Errorf("%w: %q", errAggregationType, metric.Descriptor.Type)
 }
@@ -97,17 +100,17 @@ func convertSum[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocme
 // convertNumberDataPoints converts OpenCensus TimeSeries to OpenTelemetry DataPoints.
 func convertNumberDataPoints[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) ([]metricdata.DataPoint[N], error) {
 	var points []metricdata.DataPoint[N]
-	var errInfo []string
+	var err error
 	for _, t := range ts {
-		attrs, err := convertAttrs(labelKeys, t.LabelValues)
-		if err != nil {
-			errInfo = append(errInfo, err.Error())
+		attrs, attrsErr := convertAttrs(labelKeys, t.LabelValues)
+		if attrsErr != nil {
+			err = errors.Join(err, attrsErr)
 			continue
 		}
 		for _, p := range t.Points {
 			v, ok := p.Value.(N)
 			if !ok {
-				errInfo = append(errInfo, fmt.Sprintf("%v: %q", errMismatchedValueTypes, p.Value))
+				err = errors.Join(err, fmt.Errorf("%w: %q", errMismatchedValueTypes, p.Value))
 				continue
 			}
 			points = append(points, metricdata.DataPoint[N]{
@@ -118,40 +121,35 @@ func convertNumberDataPoints[N int64 | float64](labelKeys []ocmetricdata.LabelKe
 			})
 		}
 	}
-	var aggregatedError error
-	if len(errInfo) > 0 {
-		aggregatedError = fmt.Errorf("%w: %v", errNumberDataPoint, errInfo)
-	}
-	return points, aggregatedError
+	return points, err
 }
 
 // convertHistogram converts OpenCensus Distribution timeseries to an
 // OpenTelemetry Histogram aggregation.
 func convertHistogram(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Histogram[float64], error) {
 	points := make([]metricdata.HistogramDataPoint[float64], 0, len(ts))
-	var errInfo []string
+	var err error
 	for _, t := range ts {
-		attrs, err := convertAttrs(labelKeys, t.LabelValues)
-		if err != nil {
-			errInfo = append(errInfo, err.Error())
+		attrs, attrsErr := convertAttrs(labelKeys, t.LabelValues)
+		if attrsErr != nil {
+			err = errors.Join(err, attrsErr)
 			continue
 		}
 		for _, p := range t.Points {
 			dist, ok := p.Value.(*ocmetricdata.Distribution)
 			if !ok {
-				errInfo = append(errInfo, fmt.Sprintf("%v: %d", errMismatchedValueTypes, p.Value))
+				err = errors.Join(err, fmt.Errorf("%w: %d", errMismatchedValueTypes, p.Value))
 				continue
 			}
-			bucketCounts, err := convertBucketCounts(dist.Buckets)
-			if err != nil {
-				errInfo = append(errInfo, err.Error())
+			bucketCounts, exemplars, bucketErr := convertBuckets(dist.Buckets)
+			if bucketErr != nil {
+				err = errors.Join(err, bucketErr)
 				continue
 			}
 			if dist.Count < 0 {
-				errInfo = append(errInfo, fmt.Sprintf("%v: %d", errNegativeDistributionCount, dist.Count))
+				err = errors.Join(err, fmt.Errorf("%w: %d", errNegativeCount, dist.Count))
 				continue
 			}
-			// TODO: handle exemplars
 			points = append(points, metricdata.HistogramDataPoint[float64]{
 				Attributes:   attrs,
 				StartTime:    t.StartTime,
@@ -160,28 +158,248 @@ func convertHistogram(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.Time
 				Sum:          dist.Sum,
 				Bounds:       dist.BucketOptions.Bounds,
 				BucketCounts: bucketCounts,
+				Exemplars:    exemplars,
 			})
 		}
 	}
-	var aggregatedError error
-	if len(errInfo) > 0 {
-		aggregatedError = fmt.Errorf("%w: %v", errHistogramDataPoint, errInfo)
-	}
-	return metricdata.Histogram[float64]{DataPoints: points, Temporality: metricdata.CumulativeTemporality}, aggregatedError
+	return metricdata.Histogram[float64]{DataPoints: points, Temporality: metricdata.CumulativeTemporality}, err
 }
 
-// convertBucketCounts converts from OpenCensus bucket counts to slice of uint64.
-func convertBucketCounts(buckets []ocmetricdata.Bucket) ([]uint64, error) {
+// convertBuckets converts from OpenCensus bucket counts to slice of uint64,
+// and converts OpenCensus exemplars to OpenTelemetry exemplars.
+func convertBuckets(buckets []ocmetricdata.Bucket) ([]uint64, []metricdata.Exemplar[float64], error) {
 	bucketCounts := make([]uint64, len(buckets))
+	exemplars := []metricdata.Exemplar[float64]{}
+	var err error
 	for i, bucket := range buckets {
 		if bucket.Count < 0 {
-			return nil, fmt.Errorf("%w: %q", errNegativeBucketCount, bucket.Count)
+			err = errors.Join(err, fmt.Errorf("%w: %q", errNegativeBucketCount, bucket.Count))
+			continue
 		}
 		bucketCounts[i] = uint64(bucket.Count)
+
+		if bucket.Exemplar != nil {
+			exemplar, exemplarErr := convertExemplar(bucket.Exemplar)
+			if exemplarErr != nil {
+				err = errors.Join(err, exemplarErr)
+				continue
+			}
+			exemplars = append(exemplars, exemplar)
+		}
+	}
+	return bucketCounts, exemplars, err
+}
+
+// convertExemplar converts an OpenCensus exemplar to an OpenTelemetry exemplar.
+func convertExemplar(ocExemplar *ocmetricdata.Exemplar) (metricdata.Exemplar[float64], error) {
+	exemplar := metricdata.Exemplar[float64]{
+		Value: ocExemplar.Value,
+		Time:  ocExemplar.Timestamp,
+	}
+	var err error
+	for k, v := range ocExemplar.Attachments {
+		switch {
+		case k == ocmetricdata.AttachmentKeySpanContext:
+			sc, ok := v.(octrace.SpanContext)
+			if !ok {
+				err = errors.Join(err, fmt.Errorf("%w; type: %v", errInvalidExemplarSpanContext, reflect.TypeOf(v)))
+				continue
+			}
+			exemplar.SpanID = sc.SpanID[:]
+			exemplar.TraceID = sc.TraceID[:]
+		default:
+			exemplar.FilteredAttributes = append(exemplar.FilteredAttributes, convertKV(k, v))
+		}
+	}
+	sortable := attribute.Sortable(exemplar.FilteredAttributes)
+	sort.Sort(&sortable)
+	return exemplar, err
+}
+
+// convertKV converts an OpenCensus Attachment to an OpenTelemetry KeyValue.
+func convertKV(key string, value any) attribute.KeyValue {
+	switch typedVal := value.(type) {
+	case bool:
+		return attribute.Bool(key, typedVal)
+	case int:
+		return attribute.Int(key, typedVal)
+	case int8:
+		return attribute.Int(key, int(typedVal))
+	case int16:
+		return attribute.Int(key, int(typedVal))
+	case int32:
+		return attribute.Int(key, int(typedVal))
+	case int64:
+		return attribute.Int64(key, typedVal)
+	case uint:
+		return uintKV(key, typedVal)
+	case uint8:
+		return uintKV(key, uint(typedVal))
+	case uint16:
+		return uintKV(key, uint(typedVal))
+	case uint32:
+		return uintKV(key, uint(typedVal))
+	case uintptr:
+		return uint64KV(key, uint64(typedVal))
+	case uint64:
+		return uint64KV(key, uint64(typedVal))
+	case float32:
+		return attribute.Float64(key, float64(typedVal))
+	case float64:
+		return attribute.Float64(key, typedVal)
+	case complex64:
+		return attribute.String(key, complexToString(typedVal))
+	case complex128:
+		return attribute.String(key, complexToString(typedVal))
+	case string:
+		return attribute.String(key, typedVal)
+	case []bool:
+		return attribute.BoolSlice(key, typedVal)
+	case []int:
+		return attribute.IntSlice(key, typedVal)
+	case []int8:
+		return intSliceKV(key, typedVal)
+	case []int16:
+		return intSliceKV(key, typedVal)
+	case []int32:
+		return intSliceKV(key, typedVal)
+	case []int64:
+		return attribute.Int64Slice(key, typedVal)
+	case []uint:
+		return uintSliceKV(key, typedVal)
+	case []uint8:
+		return uintSliceKV(key, typedVal)
+	case []uint16:
+		return uintSliceKV(key, typedVal)
+	case []uint32:
+		return uintSliceKV(key, typedVal)
+	case []uintptr:
+		return uintSliceKV(key, typedVal)
+	case []uint64:
+		return uintSliceKV(key, typedVal)
+	case []float32:
+		floatSlice := make([]float64, len(typedVal))
+		for i := range typedVal {
+			floatSlice[i] = float64(typedVal[i])
+		}
+		return attribute.Float64Slice(key, floatSlice)
+	case []float64:
+		return attribute.Float64Slice(key, typedVal)
+	case []complex64:
+		return complexSliceKV(key, typedVal)
+	case []complex128:
+		return complexSliceKV(key, typedVal)
+	case []string:
+		return attribute.StringSlice(key, typedVal)
+	case fmt.Stringer:
+		return attribute.Stringer(key, typedVal)
+	default:
+		return attribute.String(key, fmt.Sprintf("unhandled attribute value: %+v", value))
+	}
+}
+
+func intSliceKV[N int8 | int16 | int32](key string, val []N) attribute.KeyValue {
+	intSlice := make([]int, len(val))
+	for i := range val {
+		intSlice[i] = int(val[i])
+	}
+	return attribute.IntSlice(key, intSlice)
+}
+
+func uintKV(key string, val uint) attribute.KeyValue {
+	if val > uint(math.MaxInt) {
+		return attribute.String(key, strconv.FormatUint(uint64(val), 10))
+	}
+	return attribute.Int(key, int(val))
+}
+
+func uintSliceKV[N uint | uint8 | uint16 | uint32 | uint64 | uintptr](key string, val []N) attribute.KeyValue {
+	strSlice := make([]string, len(val))
+	for i := range val {
+		strSlice[i] = strconv.FormatUint(uint64(val[i]), 10)
 	}
-	return bucketCounts, nil
+	return attribute.StringSlice(key, strSlice)
 }
 
+func uint64KV(key string, val uint64) attribute.KeyValue {
+	const maxInt64 = ^uint64(0) >> 1
+	if val > maxInt64 {
+		return attribute.String(key, strconv.FormatUint(val, 10))
+	}
+	return attribute.Int64(key, int64(val))
+}
+
+func complexSliceKV[N complex64 | complex128](key string, val []N) attribute.KeyValue {
+	strSlice := make([]string, len(val))
+	for i := range val {
+		strSlice[i] = complexToString(val[i])
+	}
+	return attribute.StringSlice(key, strSlice)
+}
+
+func complexToString[N complex64 | complex128](val N) string {
+	return strconv.FormatComplex(complex128(val), 'f', -1, 64)
+}
+
+// convertSummary converts OpenCensus Summary timeseries to an
+// OpenTelemetry Summary.
+func convertSummary(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Summary, error) {
+	points := make([]metricdata.SummaryDataPoint, 0, len(ts))
+	var err error
+	for _, t := range ts {
+		attrs, attrErr := convertAttrs(labelKeys, t.LabelValues)
+		if attrErr != nil {
+			err = errors.Join(err, attrErr)
+			continue
+		}
+		for _, p := range t.Points {
+			summary, ok := p.Value.(*ocmetricdata.Summary)
+			if !ok {
+				err = errors.Join(err, fmt.Errorf("%w: %d", errMismatchedValueTypes, p.Value))
+				continue
+			}
+			if summary.Count < 0 {
+				err = errors.Join(err, fmt.Errorf("%w: %d", errNegativeCount, summary.Count))
+				continue
+			}
+			point := metricdata.SummaryDataPoint{
+				Attributes:     attrs,
+				StartTime:      t.StartTime,
+				Time:           p.Time,
+				Count:          uint64(summary.Count),
+				QuantileValues: convertQuantiles(summary.Snapshot),
+				Sum:            summary.Sum,
+			}
+			points = append(points, point)
+		}
+	}
+	return metricdata.Summary{DataPoints: points}, err
+}
+
+// convertQuantiles converts an OpenCensus summary snapshot to
+// OpenTelemetry quantiles.
+func convertQuantiles(snapshot ocmetricdata.Snapshot) []metricdata.QuantileValue {
+	quantileValues := make([]metricdata.QuantileValue, 0, len(snapshot.Percentiles))
+	for quantile, value := range snapshot.Percentiles {
+		quantileValues = append(quantileValues, metricdata.QuantileValue{
+			// OpenCensus quantiles are range (0-100.0], but OpenTelemetry
+			// quantiles are range [0.0, 1.0].
+			Quantile: quantile / 100.0,
+			Value:    value,
+		})
+	}
+	sort.Sort(byQuantile(quantileValues))
+	return quantileValues
+}
+
+// byQuantile implements sort.Interface for []metricdata.QuantileValue
+// based on the Quantile field.
+type byQuantile []metricdata.QuantileValue
+
+func (a byQuantile) Len() int           { return len(a) }
+func (a byQuantile) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byQuantile) Less(i, j int) bool { return a[i].Quantile < a[j].Quantile }
+
 // convertAttrs converts from OpenCensus attribute keys and values to an
 // OpenTelemetry attribute Set.
 func convertAttrs(keys []ocmetricdata.LabelKey, values []ocmetricdata.LabelValue) (attribute.Set, error) {
diff --git a/bridge/opencensus/internal/ocmetric/metric_test.go b/bridge/opencensus/internal/ocmetric/metric_test.go
index 0cbb217f293..b7a25e5c776 100644
--- a/bridge/opencensus/internal/ocmetric/metric_test.go
+++ b/bridge/opencensus/internal/ocmetric/metric_test.go
@@ -16,10 +16,15 @@ package internal // import "go.opentelemetry.io/otel/bridge/opencensus/opencensu
 
 import (
 	"errors"
+	"fmt"
+	"math"
+	"reflect"
 	"testing"
 	"time"
 
+	"github.com/stretchr/testify/assert"
 	ocmetricdata "go.opencensus.io/metric/metricdata"
+	octrace "go.opencensus.io/trace"
 
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/sdk/metric/metricdata"
@@ -28,6 +33,7 @@ import (
 
 func TestConvertMetrics(t *testing.T) {
 	endTime1 := time.Now()
+	exemplarTime := endTime1.Add(-10 * time.Second)
 	endTime2 := endTime1.Add(-time.Millisecond)
 	startTime := endTime2.Add(-time.Minute)
 	for _, tc := range []struct {
@@ -41,7 +47,7 @@ func TestConvertMetrics(t *testing.T) {
 			expected: []metricdata.Metrics{},
 		},
 		{
-			desc: "normal Histogram, gauges, and sums",
+			desc: "normal Histogram, summary, gauges, and sums",
 			input: []*ocmetricdata.Metric{
 				{
 					Descriptor: ocmetricdata.Descriptor{
@@ -56,7 +62,6 @@ func TestConvertMetrics(t *testing.T) {
 					},
 					TimeSeries: []*ocmetricdata.TimeSeries{
 						{
-
 							LabelValues: []ocmetricdata.LabelValue{
 								{
 									Value:   "hello",
@@ -74,9 +79,46 @@ func TestConvertMetrics(t *testing.T) {
 										Bounds: []float64{1.0, 2.0, 3.0},
 									},
 									Buckets: []ocmetricdata.Bucket{
-										{Count: 1},
-										{Count: 2},
-										{Count: 5},
+										{
+											Count: 1,
+											Exemplar: &ocmetricdata.Exemplar{
+												Value:     0.8,
+												Timestamp: exemplarTime,
+												Attachments: map[string]interface{}{
+													ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
+														TraceID: octrace.TraceID([16]byte{1}),
+														SpanID:  octrace.SpanID([8]byte{2}),
+													},
+													"bool": true,
+												},
+											},
+										},
+										{
+											Count: 2,
+											Exemplar: &ocmetricdata.Exemplar{
+												Value:     1.5,
+												Timestamp: exemplarTime,
+												Attachments: map[string]interface{}{
+													ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
+														TraceID: octrace.TraceID([16]byte{3}),
+														SpanID:  octrace.SpanID([8]byte{4}),
+													},
+												},
+											},
+										},
+										{
+											Count: 5,
+											Exemplar: &ocmetricdata.Exemplar{
+												Value:     2.6,
+												Timestamp: exemplarTime,
+												Attachments: map[string]interface{}{
+													ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
+														TraceID: octrace.TraceID([16]byte{5}),
+														SpanID:  octrace.SpanID([8]byte{6}),
+													},
+												},
+											},
+										},
 									},
 								}),
 								ocmetricdata.NewDistributionPoint(endTime2, &ocmetricdata.Distribution{
@@ -86,9 +128,45 @@ func TestConvertMetrics(t *testing.T) {
 										Bounds: []float64{1.0, 2.0, 3.0},
 									},
 									Buckets: []ocmetricdata.Bucket{
-										{Count: 1},
-										{Count: 4},
-										{Count: 5},
+										{
+											Count: 1,
+											Exemplar: &ocmetricdata.Exemplar{
+												Value:     0.9,
+												Timestamp: exemplarTime,
+												Attachments: map[string]interface{}{
+													ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
+														TraceID: octrace.TraceID([16]byte{7}),
+														SpanID:  octrace.SpanID([8]byte{8}),
+													},
+												},
+											},
+										},
+										{
+											Count: 4,
+											Exemplar: &ocmetricdata.Exemplar{
+												Value:     1.1,
+												Timestamp: exemplarTime,
+												Attachments: map[string]interface{}{
+													ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
+														TraceID: octrace.TraceID([16]byte{9}),
+														SpanID:  octrace.SpanID([8]byte{10}),
+													},
+												},
+											},
+										},
+										{
+											Count: 5,
+											Exemplar: &ocmetricdata.Exemplar{
+												Value:     2.7,
+												Timestamp: exemplarTime,
+												Attachments: map[string]interface{}{
+													ocmetricdata.AttachmentKeySpanContext: octrace.SpanContext{
+														TraceID: octrace.TraceID([16]byte{11}),
+														SpanID:  octrace.SpanID([8]byte{12}),
+													},
+												},
+											},
+										},
 									},
 								}),
 							},
@@ -207,6 +285,54 @@ func TestConvertMetrics(t *testing.T) {
 							},
 						},
 					},
+				}, {
+					Descriptor: ocmetricdata.Descriptor{
+						Name:        "foo.com/summary-a",
+						Description: "a testing summary",
+						Unit:        ocmetricdata.UnitMilliseconds,
+						Type:        ocmetricdata.TypeSummary,
+						LabelKeys: []ocmetricdata.LabelKey{
+							{Key: "g"},
+							{Key: "h"},
+						},
+					},
+					TimeSeries: []*ocmetricdata.TimeSeries{
+						{
+							LabelValues: []ocmetricdata.LabelValue{
+								{
+									Value:   "ding",
+									Present: true,
+								}, {
+									Value:   "dong",
+									Present: true,
+								},
+							},
+							Points: []ocmetricdata.Point{
+								ocmetricdata.NewSummaryPoint(endTime1, &ocmetricdata.Summary{
+									Count:          10,
+									Sum:            13.2,
+									HasCountAndSum: true,
+									Snapshot: ocmetricdata.Snapshot{
+										Percentiles: map[float64]float64{
+											50.0:  1.0,
+											0.0:   0.1,
+											100.0: 10.4,
+										},
+									},
+								}),
+								ocmetricdata.NewSummaryPoint(endTime2, &ocmetricdata.Summary{
+									Count: 12,
+									Snapshot: ocmetricdata.Snapshot{
+										Percentiles: map[float64]float64{
+											0.0:   0.2,
+											50.0:  1.1,
+											100.0: 10.5,
+										},
+									},
+								}),
+							},
+						},
+					},
 				},
 			},
 			expected: []metricdata.Metrics{
@@ -230,6 +356,29 @@ func TestConvertMetrics(t *testing.T) {
 								Sum:          100.0,
 								Bounds:       []float64{1.0, 2.0, 3.0},
 								BucketCounts: []uint64{1, 2, 5},
+								Exemplars: []metricdata.Exemplar[float64]{
+									{
+										Time:    exemplarTime,
+										Value:   0.8,
+										TraceID: []byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+										SpanID:  []byte{2, 0, 0, 0, 0, 0, 0, 0},
+										FilteredAttributes: []attribute.KeyValue{
+											attribute.Bool("bool", true),
+										},
+									},
+									{
+										Time:    exemplarTime,
+										Value:   1.5,
+										TraceID: []byte{3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+										SpanID:  []byte{4, 0, 0, 0, 0, 0, 0, 0},
+									},
+									{
+										Time:    exemplarTime,
+										Value:   2.6,
+										TraceID: []byte{5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+										SpanID:  []byte{6, 0, 0, 0, 0, 0, 0, 0},
+									},
+								},
 							}, {
 								Attributes: attribute.NewSet(attribute.KeyValue{
 									Key:   attribute.Key("a"),
@@ -244,6 +393,26 @@ func TestConvertMetrics(t *testing.T) {
 								Sum:          110.0,
 								Bounds:       []float64{1.0, 2.0, 3.0},
 								BucketCounts: []uint64{1, 4, 5},
+								Exemplars: []metricdata.Exemplar[float64]{
+									{
+										Time:    exemplarTime,
+										Value:   0.9,
+										TraceID: []byte{7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+										SpanID:  []byte{8, 0, 0, 0, 0, 0, 0, 0},
+									},
+									{
+										Time:    exemplarTime,
+										Value:   1.1,
+										TraceID: []byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+										SpanID:  []byte{10, 0, 0, 0, 0, 0, 0, 0},
+									},
+									{
+										Time:    exemplarTime,
+										Value:   2.7,
+										TraceID: []byte{11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+										SpanID:  []byte{12, 0, 0, 0, 0, 0, 0, 0},
+									},
+								},
 							},
 						},
 						Temporality: metricdata.CumulativeTemporality,
@@ -368,9 +537,68 @@ func TestConvertMetrics(t *testing.T) {
 							},
 						},
 					},
+				}, {
+					Name:        "foo.com/summary-a",
+					Description: "a testing summary",
+					Unit:        "ms",
+					Data: metricdata.Summary{
+						DataPoints: []metricdata.SummaryDataPoint{
+							{
+								Attributes: attribute.NewSet(attribute.KeyValue{
+									Key:   attribute.Key("g"),
+									Value: attribute.StringValue("ding"),
+								}, attribute.KeyValue{
+									Key:   attribute.Key("h"),
+									Value: attribute.StringValue("dong"),
+								}),
+								Time:  endTime1,
+								Count: 10,
+								Sum:   13.2,
+								QuantileValues: []metricdata.QuantileValue{
+									{
+										Quantile: 0.0,
+										Value:    0.1,
+									},
+									{
+										Quantile: 0.5,
+										Value:    1.0,
+									},
+									{
+										Quantile: 1.0,
+										Value:    10.4,
+									},
+								},
+							}, {
+								Attributes: attribute.NewSet(attribute.KeyValue{
+									Key:   attribute.Key("g"),
+									Value: attribute.StringValue("ding"),
+								}, attribute.KeyValue{
+									Key:   attribute.Key("h"),
+									Value: attribute.StringValue("dong"),
+								}),
+								Time:  endTime2,
+								Count: 12,
+								QuantileValues: []metricdata.QuantileValue{
+									{
+										Quantile: 0.0,
+										Value:    0.2,
+									},
+									{
+										Quantile: 0.5,
+										Value:    1.1,
+									},
+									{
+										Quantile: 1.0,
+										Value:    10.5,
+									},
+								},
+							},
+						},
+					},
 				},
 			},
-		}, {
+		},
+		{
 			desc: "histogram without data points",
 			input: []*ocmetricdata.Metric{
 				{
@@ -393,7 +621,8 @@ func TestConvertMetrics(t *testing.T) {
 					},
 				},
 			},
-		}, {
+		},
+		{
 			desc: "sum without data points",
 			input: []*ocmetricdata.Metric{
 				{
@@ -417,7 +646,8 @@ func TestConvertMetrics(t *testing.T) {
 					},
 				},
 			},
-		}, {
+		},
+		{
 			desc: "gauge without data points",
 			input: []*ocmetricdata.Metric{
 				{
@@ -439,7 +669,8 @@ func TestConvertMetrics(t *testing.T) {
 					},
 				},
 			},
-		}, {
+		},
+		{
 			desc: "histogram with negative count",
 			input: []*ocmetricdata.Metric{
 				{
@@ -461,8 +692,9 @@ func TestConvertMetrics(t *testing.T) {
 					},
 				},
 			},
-			expectedErr: errConversion,
-		}, {
+			expectedErr: errNegativeCount,
+		},
+		{
 			desc: "histogram with negative bucket count",
 			input: []*ocmetricdata.Metric{
 				{
@@ -488,8 +720,9 @@ func TestConvertMetrics(t *testing.T) {
 					},
 				},
 			},
-			expectedErr: errConversion,
-		}, {
+			expectedErr: errNegativeBucketCount,
+		},
+		{
 			desc: "histogram with non-histogram datapoint type",
 			input: []*ocmetricdata.Metric{
 				{
@@ -509,8 +742,125 @@ func TestConvertMetrics(t *testing.T) {
 					},
 				},
 			},
-			expectedErr: errConversion,
-		}, {
+			expectedErr: errMismatchedValueTypes,
+		},
+		{
+			desc: "summary with mismatched attributes",
+			input: []*ocmetricdata.Metric{
+				{
+					Descriptor: ocmetricdata.Descriptor{
+						Name:        "foo.com/summary-mismatched",
+						Description: "a mismatched summary",
+						Unit:        ocmetricdata.UnitMilliseconds,
+						Type:        ocmetricdata.TypeSummary,
+						LabelKeys: []ocmetricdata.LabelKey{
+							{Key: "g"},
+						},
+					},
+					TimeSeries: []*ocmetricdata.TimeSeries{
+						{
+							LabelValues: []ocmetricdata.LabelValue{
+								{
+									Value:   "ding",
+									Present: true,
+								}, {
+									Value:   "dong",
+									Present: true,
+								},
+							},
+							Points: []ocmetricdata.Point{
+								ocmetricdata.NewSummaryPoint(endTime1, &ocmetricdata.Summary{
+									Count:          10,
+									Sum:            13.2,
+									HasCountAndSum: true,
+									Snapshot: ocmetricdata.Snapshot{
+										Percentiles: map[float64]float64{
+											0.0: 0.1,
+											0.5: 1.0,
+											1.0: 10.4,
+										},
+									},
+								}),
+							},
+						},
+					},
+				},
+			},
+			expectedErr: errMismatchedAttributeKeyValues,
+		},
+		{
+			desc: "summary with negative count",
+			input: []*ocmetricdata.Metric{
+				{
+					Descriptor: ocmetricdata.Descriptor{
+						Name:        "foo.com/summary-negative",
+						Description: "a negative count summary",
+						Unit:        ocmetricdata.UnitMilliseconds,
+						Type:        ocmetricdata.TypeSummary,
+					},
+					TimeSeries: []*ocmetricdata.TimeSeries{
+						{
+							Points: []ocmetricdata.Point{
+								ocmetricdata.NewSummaryPoint(endTime1, &ocmetricdata.Summary{
+									Count:          -10,
+									Sum:            13.2,
+									HasCountAndSum: true,
+									Snapshot: ocmetricdata.Snapshot{
+										Percentiles: map[float64]float64{
+											0.0: 0.1,
+											0.5: 1.0,
+											1.0: 10.4,
+										},
+									},
+								}),
+							},
+						},
+					},
+				},
+			},
+			expectedErr: errNegativeCount,
+		},
+		{
+			desc: "histogram with invalid span context exemplar",
+			input: []*ocmetricdata.Metric{
+				{
+					Descriptor: ocmetricdata.Descriptor{
+						Name:        "foo.com/histogram-a",
+						Description: "a testing histogram",
+						Unit:        ocmetricdata.UnitDimensionless,
+						Type:        ocmetricdata.TypeCumulativeDistribution,
+					},
+					TimeSeries: []*ocmetricdata.TimeSeries{
+						{
+							Points: []ocmetricdata.Point{
+								ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{
+									Count: 8,
+									Sum:   100.0,
+									BucketOptions: &ocmetricdata.BucketOptions{
+										Bounds: []float64{1.0, 2.0, 3.0},
+									},
+									Buckets: []ocmetricdata.Bucket{
+										{
+											Count: 1,
+											Exemplar: &ocmetricdata.Exemplar{
+												Value:     0.8,
+												Timestamp: exemplarTime,
+												Attachments: map[string]interface{}{
+													ocmetricdata.AttachmentKeySpanContext: "notaspancontext",
+												},
+											},
+										},
+									},
+								}),
+							},
+							StartTime: startTime,
+						},
+					},
+				},
+			},
+			expectedErr: errInvalidExemplarSpanContext,
+		},
+		{
 			desc: "sum with non-sum datapoint type",
 			input: []*ocmetricdata.Metric{
 				{
@@ -530,8 +880,9 @@ func TestConvertMetrics(t *testing.T) {
 					},
 				},
 			},
-			expectedErr: errConversion,
-		}, {
+			expectedErr: errMismatchedValueTypes,
+		},
+		{
 			desc: "gauge with non-gauge datapoint type",
 			input: []*ocmetricdata.Metric{
 				{
@@ -551,8 +902,31 @@ func TestConvertMetrics(t *testing.T) {
 					},
 				},
 			},
-			expectedErr: errConversion,
-		}, {
+			expectedErr: errMismatchedValueTypes,
+		},
+		{
+			desc: "summary with non-summary datapoint type",
+			input: []*ocmetricdata.Metric{
+				{
+					Descriptor: ocmetricdata.Descriptor{
+						Name:        "foo.com/bad-point",
+						Description: "a bad type",
+						Unit:        ocmetricdata.UnitDimensionless,
+						Type:        ocmetricdata.TypeSummary,
+					},
+					TimeSeries: []*ocmetricdata.TimeSeries{
+						{
+							Points: []ocmetricdata.Point{
+								ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}),
+							},
+							StartTime: startTime,
+						},
+					},
+				},
+			},
+			expectedErr: errMismatchedValueTypes,
+		},
+		{
 			desc: "unsupported Gauge Distribution type",
 			input: []*ocmetricdata.Metric{
 				{
@@ -564,13 +938,13 @@ func TestConvertMetrics(t *testing.T) {
 					},
 				},
 			},
-			expectedErr: errConversion,
+			expectedErr: errAggregationType,
 		},
 	} {
 		t.Run(tc.desc, func(t *testing.T) {
 			output, err := ConvertMetrics(tc.input)
 			if !errors.Is(err, tc.expectedErr) {
-				t.Errorf("convertAggregation(%+v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr)
+				t.Errorf("ConvertMetrics(%+v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr)
 			}
 			metricdatatest.AssertEqual[metricdata.ScopeMetrics](t,
 				metricdata.ScopeMetrics{Metrics: tc.expected},
@@ -632,3 +1006,176 @@ func TestConvertAttributes(t *testing.T) {
 		})
 	}
 }
+
+type fakeStringer string
+
+func (f fakeStringer) String() string {
+	return string(f)
+}
+
+func TestConvertKV(t *testing.T) {
+	key := "foo"
+	for _, tt := range []struct {
+		value    any
+		expected attribute.Value
+	}{
+		{
+			value:    bool(true),
+			expected: attribute.BoolValue(true),
+		},
+		{
+			value:    []bool{true, false},
+			expected: attribute.BoolSliceValue([]bool{true, false}),
+		},
+		{
+			value:    int(10),
+			expected: attribute.IntValue(10),
+		},
+		{
+			value:    []int{10, 20},
+			expected: attribute.IntSliceValue([]int{10, 20}),
+		},
+		{
+			value:    int8(10),
+			expected: attribute.IntValue(10),
+		},
+		{
+			value:    []int8{10, 20},
+			expected: attribute.IntSliceValue([]int{10, 20}),
+		},
+		{
+			value:    int16(10),
+			expected: attribute.IntValue(10),
+		},
+		{
+			value:    []int16{10, 20},
+			expected: attribute.IntSliceValue([]int{10, 20}),
+		},
+		{
+			value:    int32(10),
+			expected: attribute.IntValue(10),
+		},
+		{
+			value:    []int32{10, 20},
+			expected: attribute.IntSliceValue([]int{10, 20}),
+		},
+		{
+			value:    int64(10),
+			expected: attribute.Int64Value(10),
+		},
+		{
+			value:    []int64{10, 20},
+			expected: attribute.Int64SliceValue([]int64{10, 20}),
+		},
+		{
+			value:    uint(10),
+			expected: attribute.IntValue(10),
+		},
+		{
+			value:    uint(math.MaxUint),
+			expected: attribute.StringValue(fmt.Sprintf("%v", uint(math.MaxUint))),
+		},
+		{
+			value:    []uint{10, 20},
+			expected: attribute.StringSliceValue([]string{"10", "20"}),
+		},
+		{
+			value:    uint8(10),
+			expected: attribute.IntValue(10),
+		},
+		{
+			value:    []uint8{10, 20},
+			expected: attribute.StringSliceValue([]string{"10", "20"}),
+		},
+		{
+			value:    uint16(10),
+			expected: attribute.IntValue(10),
+		},
+		{
+			value:    []uint16{10, 20},
+			expected: attribute.StringSliceValue([]string{"10", "20"}),
+		},
+		{
+			value:    uint32(10),
+			expected: attribute.IntValue(10),
+		},
+		{
+			value:    []uint32{10, 20},
+			expected: attribute.StringSliceValue([]string{"10", "20"}),
+		},
+		{
+			value:    uint64(10),
+			expected: attribute.Int64Value(10),
+		},
+		{
+			value:    uint64(math.MaxUint64),
+			expected: attribute.StringValue("18446744073709551615"),
+		},
+		{
+			value:    []uint64{10, 20},
+			expected: attribute.StringSliceValue([]string{"10", "20"}),
+		},
+		{
+			value:    uintptr(10),
+			expected: attribute.Int64Value(10),
+		},
+		{
+			value:    []uintptr{10, 20},
+			expected: attribute.StringSliceValue([]string{"10", "20"}),
+		},
+		{
+			value:    float32(10),
+			expected: attribute.Float64Value(10),
+		},
+		{
+			value:    []float32{10, 20},
+			expected: attribute.Float64SliceValue([]float64{10, 20}),
+		},
+		{
+			value:    float64(10),
+			expected: attribute.Float64Value(10),
+		},
+		{
+			value:    []float64{10, 20},
+			expected: attribute.Float64SliceValue([]float64{10, 20}),
+		},
+		{
+			value:    complex64(10),
+			expected: attribute.StringValue("(10+0i)"),
+		},
+		{
+			value:    []complex64{10, 20},
+			expected: attribute.StringSliceValue([]string{"(10+0i)", "(20+0i)"}),
+		},
+		{
+			value:    complex128(10),
+			expected: attribute.StringValue("(10+0i)"),
+		},
+		{
+			value:    []complex128{10, 20},
+			expected: attribute.StringSliceValue([]string{"(10+0i)", "(20+0i)"}),
+		},
+		{
+			value:    "string",
+			expected: attribute.StringValue("string"),
+		},
+		{
+			value:    []string{"string", "slice"},
+			expected: attribute.StringSliceValue([]string{"string", "slice"}),
+		},
+		{
+			value:    fakeStringer("stringer"),
+			expected: attribute.StringValue("stringer"),
+		},
+		{
+			value:    metricdata.Histogram[float64]{},
+			expected: attribute.StringValue("unhandled attribute value: {DataPoints:[] Temporality:undefinedTemporality}"),
+		},
+	} {
+		t.Run(fmt.Sprintf("%v(%+v)", reflect.TypeOf(tt.value), tt.value), func(t *testing.T) {
+			got := convertKV(key, tt.value)
+			assert.Equal(t, key, string(got.Key))
+			assert.Equal(t, tt.expected, got.Value)
+		})
+	}
+}
diff --git a/bridge/opencensus/internal/tracer_test.go b/bridge/opencensus/internal/tracer_test.go
index 1e3518a7efe..618a7b81567 100644
--- a/bridge/opencensus/internal/tracer_test.go
+++ b/bridge/opencensus/internal/tracer_test.go
@@ -24,6 +24,8 @@ import (
 	"go.opentelemetry.io/otel/bridge/opencensus/internal/oc2otel"
 	"go.opentelemetry.io/otel/bridge/opencensus/internal/otel2oc"
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+	"go.opentelemetry.io/otel/trace/noop"
 )
 
 type handler struct{ err error }
@@ -38,6 +40,8 @@ func withHandler() (*handler, func()) {
 }
 
 type tracer struct {
+	embedded.Tracer
+
 	ctx  context.Context
 	name string
 	opts []trace.SpanStartOption
@@ -45,8 +49,8 @@ type tracer struct {
 
 func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
 	t.ctx, t.name, t.opts = ctx, name, opts
-	noop := trace.NewNoopTracerProvider().Tracer("testing")
-	return noop.Start(ctx, name, opts...)
+	sub := noop.NewTracerProvider().Tracer("testing")
+	return sub.Start(ctx, name, opts...)
 }
 
 type ctxKey string
@@ -110,11 +114,11 @@ func TestTracerFromContext(t *testing.T) {
 	})
 	ctx := trace.ContextWithSpanContext(context.Background(), sc)
 
-	noop := trace.NewNoopTracerProvider().Tracer("TestTracerFromContext")
+	tracer := noop.NewTracerProvider().Tracer("TestTracerFromContext")
 	// Test using the fact that the No-Op span will propagate a span context .
-	ctx, _ = noop.Start(ctx, "test")
+	ctx, _ = tracer.Start(ctx, "test")
 
-	got := internal.NewTracer(noop).FromContext(ctx).SpanContext()
+	got := internal.NewTracer(tracer).FromContext(ctx).SpanContext()
 	// Do not test the convedsion, only that the propagtion.
 	want := otel2oc.SpanContext(sc)
 	if got != want {
@@ -129,11 +133,11 @@ func TestTracerNewContext(t *testing.T) {
 	})
 	ctx := trace.ContextWithSpanContext(context.Background(), sc)
 
-	noop := trace.NewNoopTracerProvider().Tracer("TestTracerNewContext")
+	tracer := noop.NewTracerProvider().Tracer("TestTracerNewContext")
 	// Test using the fact that the No-Op span will propagate a span context .
-	_, s := noop.Start(ctx, "test")
+	_, s := tracer.Start(ctx, "test")
 
-	ocTracer := internal.NewTracer(noop)
+	ocTracer := internal.NewTracer(tracer)
 	ctx = ocTracer.NewContext(context.Background(), internal.NewSpan(s))
 	got := trace.SpanContextFromContext(ctx)
 
diff --git a/bridge/opencensus/metric.go b/bridge/opencensus/metric.go
index 870faa23670..888e82f5ff3 100644
--- a/bridge/opencensus/metric.go
+++ b/bridge/opencensus/metric.go
@@ -18,32 +18,33 @@ import (
 	"context"
 
 	ocmetricdata "go.opencensus.io/metric/metricdata"
-	"go.opencensus.io/metric/metricexport"
 	"go.opencensus.io/metric/metricproducer"
 
-	"go.opentelemetry.io/otel"
 	internal "go.opentelemetry.io/otel/bridge/opencensus/internal/ocmetric"
 	"go.opentelemetry.io/otel/sdk/instrumentation"
 	"go.opentelemetry.io/otel/sdk/metric"
 	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-	"go.opentelemetry.io/otel/sdk/resource"
 )
 
-const scopeName = "go.opentelemetry.io/otel/bridge/opencensus"
-
-type producer struct {
+// MetricProducer implements the [go.opentelemetry.io/otel/sdk/metric.Producer] to provide metrics
+// from OpenCensus to the OpenTelemetry SDK.
+type MetricProducer struct {
 	manager *metricproducer.Manager
 }
 
 // NewMetricProducer returns a metric.Producer that fetches metrics from
 // OpenCensus.
-func NewMetricProducer() metric.Producer {
-	return &producer{
+func NewMetricProducer(opts ...MetricOption) *MetricProducer {
+	return &MetricProducer{
 		manager: metricproducer.GlobalManager(),
 	}
 }
 
-func (p *producer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) {
+var _ metric.Producer = (*MetricProducer)(nil)
+
+// Produce fetches metrics from the OpenCensus manager,
+// translates them to OpenTelemetry's data model, and returns them.
+func (p *MetricProducer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) {
 	producers := p.manager.GetAll()
 	data := []*ocmetricdata.Metric{}
 	for _, ocProducer := range producers {
@@ -55,44 +56,9 @@ func (p *producer) Produce(context.Context) ([]metricdata.ScopeMetrics, error) {
 	}
 	return []metricdata.ScopeMetrics{{
 		Scope: instrumentation.Scope{
-			Name: scopeName,
+			Name:    scopeName,
+			Version: Version(),
 		},
 		Metrics: otelmetrics,
 	}}, err
 }
-
-// exporter implements the OpenCensus metric Exporter interface using an
-// OpenTelemetry base exporter.
-type exporter struct {
-	base metric.Exporter
-	res  *resource.Resource
-}
-
-// NewMetricExporter returns an OpenCensus exporter that exports to an
-// OpenTelemetry (push) exporter.
-// Deprecated: Use NewMetricProducer instead.
-func NewMetricExporter(base metric.Exporter, res *resource.Resource) metricexport.Exporter {
-	return &exporter{base: base, res: res}
-}
-
-// ExportMetrics implements the OpenCensus metric Exporter interface by sending
-// to an OpenTelemetry exporter.
-func (e *exporter) ExportMetrics(ctx context.Context, ocmetrics []*ocmetricdata.Metric) error {
-	otelmetrics, err := internal.ConvertMetrics(ocmetrics)
-	if err != nil {
-		otel.Handle(err)
-	}
-	if len(otelmetrics) == 0 {
-		return nil
-	}
-	return e.base.Export(ctx, &metricdata.ResourceMetrics{
-		Resource: e.res,
-		ScopeMetrics: []metricdata.ScopeMetrics{
-			{
-				Scope: instrumentation.Scope{
-					Name: scopeName,
-				},
-				Metrics: otelmetrics,
-			},
-		}})
-}
diff --git a/bridge/opencensus/metric_test.go b/bridge/opencensus/metric_test.go
index 58c11aadc0a..901953c3a81 100644
--- a/bridge/opencensus/metric_test.go
+++ b/bridge/opencensus/metric_test.go
@@ -16,7 +16,6 @@ package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus"
 
 import (
 	"context"
-	"fmt"
 	"testing"
 	"time"
 
@@ -27,10 +26,8 @@ import (
 
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/sdk/instrumentation"
-	"go.opentelemetry.io/otel/sdk/metric"
 	"go.opentelemetry.io/otel/sdk/metric/metricdata"
 	"go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest"
-	"go.opentelemetry.io/otel/sdk/resource"
 )
 
 func TestMetricProducer(t *testing.T) {
@@ -67,7 +64,8 @@ func TestMetricProducer(t *testing.T) {
 			},
 			expected: []metricdata.ScopeMetrics{{
 				Scope: instrumentation.Scope{
-					Name: scopeName,
+					Name:    scopeName,
+					Version: Version(),
 				},
 				Metrics: []metricdata.Metrics{
 					{
@@ -115,7 +113,8 @@ func TestMetricProducer(t *testing.T) {
 			},
 			expected: []metricdata.ScopeMetrics{{
 				Scope: instrumentation.Scope{
-					Name: scopeName,
+					Name:    scopeName,
+					Version: Version(),
 				},
 				Metrics: []metricdata.Metrics{
 					{
@@ -160,127 +159,3 @@ type fakeOCProducer struct {
 func (f *fakeOCProducer) Read() []*ocmetricdata.Metric {
 	return f.metrics
 }
-
-func TestPushMetricsExporter(t *testing.T) {
-	now := time.Now()
-	for _, tc := range []struct {
-		desc          string
-		input         []*ocmetricdata.Metric
-		inputResource *resource.Resource
-		exportErr     error
-		expected      *metricdata.ResourceMetrics
-		expectErr     bool
-	}{
-		{
-			desc: "empty batch isn't sent",
-		},
-		{
-			desc:      "export error",
-			exportErr: fmt.Errorf("failed to export"),
-			input: []*ocmetricdata.Metric{
-				{
-					Resource: &ocresource.Resource{
-						Labels: map[string]string{
-							"R1": "V1",
-							"R2": "V2",
-						},
-					},
-					TimeSeries: []*ocmetricdata.TimeSeries{
-						{
-							StartTime: now,
-							Points: []ocmetricdata.Point{
-								{Value: int64(123), Time: now},
-							},
-						},
-					},
-				},
-			},
-			expectErr: true,
-		},
-		{
-			desc: "success",
-			input: []*ocmetricdata.Metric{
-				{
-					Resource: &ocresource.Resource{
-						Labels: map[string]string{
-							"R1": "V1",
-							"R2": "V2",
-						},
-					},
-					TimeSeries: []*ocmetricdata.TimeSeries{
-						{
-							StartTime: now,
-							Points: []ocmetricdata.Point{
-								{Value: int64(123), Time: now},
-							},
-						},
-					},
-				},
-			},
-			inputResource: resource.NewSchemaless(
-				attribute.String("R1", "V1"),
-				attribute.String("R2", "V2"),
-			),
-			expected: &metricdata.ResourceMetrics{
-				Resource: resource.NewSchemaless(
-					attribute.String("R1", "V1"),
-					attribute.String("R2", "V2"),
-				),
-				ScopeMetrics: []metricdata.ScopeMetrics{
-					{
-						Scope: instrumentation.Scope{
-							Name: scopeName,
-						},
-						Metrics: []metricdata.Metrics{
-							{
-								Name:        "",
-								Description: "",
-								Unit:        "",
-								Data: metricdata.Gauge[int64]{
-									DataPoints: []metricdata.DataPoint[int64]{
-										{
-											Attributes: attribute.NewSet(),
-											StartTime:  now,
-											Time:       now,
-											Value:      123,
-										},
-									},
-								},
-							},
-						},
-					},
-				},
-			},
-		},
-	} {
-		t.Run(tc.desc, func(t *testing.T) {
-			fake := &fakeExporter{err: tc.exportErr}
-			exporter := NewMetricExporter(fake, tc.inputResource)
-			err := exporter.ExportMetrics(context.Background(), tc.input)
-			if tc.expectErr {
-				require.Error(t, err)
-			} else {
-				require.NoError(t, err)
-			}
-			if tc.expected != nil {
-				require.NotNil(t, fake.data)
-				metricdatatest.AssertEqual(t, *tc.expected, *fake.data)
-			} else {
-				require.Nil(t, fake.data)
-			}
-		})
-	}
-}
-
-type fakeExporter struct {
-	metric.Exporter
-	data *metricdata.ResourceMetrics
-	err  error
-}
-
-func (f *fakeExporter) Export(ctx context.Context, data *metricdata.ResourceMetrics) error {
-	if f.err == nil {
-		f.data = data
-	}
-	return f.err
-}
diff --git a/bridge/opencensus/test/bridge_test.go b/bridge/opencensus/test/bridge_test.go
index 5a278be0dfe..98041025c60 100644
--- a/bridge/opencensus/test/bridge_test.go
+++ b/bridge/opencensus/test/bridge_test.go
@@ -33,7 +33,7 @@ func TestMixedAPIs(t *testing.T) {
 	sr := tracetest.NewSpanRecorder()
 	tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr))
 	tracer := tp.Tracer("mixedapitracer")
-	octrace.DefaultTracer = ocbridge.NewTracer(tracer)
+	ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp))
 
 	func() {
 		ctx := context.Background()
@@ -77,7 +77,7 @@ func TestMixedAPIs(t *testing.T) {
 func TestStartOptions(t *testing.T) {
 	sr := tracetest.NewSpanRecorder()
 	tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr))
-	octrace.DefaultTracer = ocbridge.NewTracer(tp.Tracer("startoptionstracer"))
+	ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp))
 
 	ctx := context.Background()
 	_, span := octrace.StartSpan(ctx, "OpenCensusSpan", octrace.WithSpanKind(octrace.SpanKindClient))
@@ -97,8 +97,8 @@ func TestStartOptions(t *testing.T) {
 func TestStartSpanWithRemoteParent(t *testing.T) {
 	sr := tracetest.NewSpanRecorder()
 	tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr))
+	ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp))
 	tracer := tp.Tracer("remoteparent")
-	octrace.DefaultTracer = ocbridge.NewTracer(tracer)
 
 	ctx := context.Background()
 	ctx, parent := tracer.Start(ctx, "OpenTelemetrySpan1")
@@ -120,8 +120,8 @@ func TestStartSpanWithRemoteParent(t *testing.T) {
 func TestToFromContext(t *testing.T) {
 	sr := tracetest.NewSpanRecorder()
 	tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr))
+	ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp))
 	tracer := tp.Tracer("tofromcontext")
-	octrace.DefaultTracer = ocbridge.NewTracer(tracer)
 
 	func() {
 		ctx := context.Background()
@@ -159,7 +159,7 @@ func TestToFromContext(t *testing.T) {
 func TestIsRecordingEvents(t *testing.T) {
 	sr := tracetest.NewSpanRecorder()
 	tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr))
-	octrace.DefaultTracer = ocbridge.NewTracer(tp.Tracer("isrecordingevents"))
+	ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp))
 
 	ctx := context.Background()
 	_, ocspan := octrace.StartSpan(ctx, "OpenCensusSpan1")
@@ -179,7 +179,7 @@ func attrsMap(s []attribute.KeyValue) map[attribute.Key]attribute.Value {
 func TestSetThings(t *testing.T) {
 	sr := tracetest.NewSpanRecorder()
 	tp := sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr))
-	octrace.DefaultTracer = ocbridge.NewTracer(tp.Tracer("setthings"))
+	ocbridge.InstallTraceBridge(ocbridge.WithTracerProvider(tp))
 
 	ctx := context.Background()
 	_, ocspan := octrace.StartSpan(ctx, "OpenCensusSpan1")
diff --git a/bridge/opencensus/test/go.mod b/bridge/opencensus/test/go.mod
index 1290cee67be..9fe2e306450 100644
--- a/bridge/opencensus/test/go.mod
+++ b/bridge/opencensus/test/go.mod
@@ -1,22 +1,22 @@
 module go.opentelemetry.io/otel/bridge/opencensus/test
 
-go 1.19
+go 1.20
 
 require (
 	go.opencensus.io v0.24.0
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/bridge/opencensus v0.39.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/bridge/opencensus v0.44.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 )
 
 require (
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	go.opentelemetry.io/otel/sdk/metric v0.39.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	go.opentelemetry.io/otel/sdk/metric v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 )
 
 replace go.opentelemetry.io/otel => ../../..
diff --git a/bridge/opencensus/test/go.sum b/bridge/opencensus/test/go.sum
index 8661ceb6298..6e76c6aed70 100644
--- a/bridge/opencensus/test/go.sum
+++ b/bridge/opencensus/test/go.sum
@@ -11,8 +11,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
 github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -34,7 +34,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
 github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -68,8 +68,8 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/bridge/opencensus/bridge.go b/bridge/opencensus/trace.go
similarity index 73%
rename from bridge/opencensus/bridge.go
rename to bridge/opencensus/trace.go
index 7e6f31202d6..92ec6d6961a 100644
--- a/bridge/opencensus/bridge.go
+++ b/bridge/opencensus/trace.go
@@ -23,11 +23,18 @@ import (
 	"go.opentelemetry.io/otel/trace"
 )
 
-// NewTracer returns an implementation of the OpenCensus Tracer interface which
-// uses OpenTelemetry APIs.  Using this implementation of Tracer "upgrades"
-// libraries that use OpenCensus to OpenTelemetry to facilitate a migration.
-func NewTracer(tracer trace.Tracer) octrace.Tracer {
-	return internal.NewTracer(tracer)
+// InstallTraceBridge installs the OpenCensus trace bridge, which overwrites
+// the global OpenCensus tracer implementation. Once the bridge is installed,
+// spans recorded using OpenCensus are redirected to the OpenTelemetry SDK.
+func InstallTraceBridge(opts ...TraceOption) {
+	octrace.DefaultTracer = newTraceBridge(opts)
+}
+
+func newTraceBridge(opts []TraceOption) octrace.Tracer {
+	cfg := newTraceConfig(opts)
+	return internal.NewTracer(
+		cfg.tp.Tracer(scopeName, trace.WithInstrumentationVersion(Version())),
+	)
 }
 
 // OTelSpanContextToOC converts from an OpenTelemetry SpanContext to an
diff --git a/bridge/opencensus/trace_test.go b/bridge/opencensus/trace_test.go
new file mode 100644
index 00000000000..529a77070c7
--- /dev/null
+++ b/bridge/opencensus/trace_test.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus"
+
+import (
+	"context"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+
+	"go.opentelemetry.io/otel/sdk/trace"
+	"go.opentelemetry.io/otel/sdk/trace/tracetest"
+)
+
+func TestNewTraceBridge(t *testing.T) {
+	exporter := tracetest.NewInMemoryExporter()
+	tp := trace.NewTracerProvider(trace.WithSyncer(exporter))
+	bridge := newTraceBridge([]TraceOption{WithTracerProvider(tp)})
+	_, span := bridge.StartSpan(context.Background(), "foo")
+	span.End()
+	gotSpans := exporter.GetSpans()
+	require.Len(t, gotSpans, 1)
+	gotSpan := gotSpans[0]
+	assert.Equal(t, gotSpan.InstrumentationLibrary.Name, scopeName)
+	assert.Equal(t, gotSpan.InstrumentationLibrary.Version, Version())
+}
diff --git a/example/view/doc.go b/bridge/opencensus/version.go
similarity index 76%
rename from example/view/doc.go
rename to bridge/opencensus/version.go
index 6307e5f20e3..8fb56844e65 100644
--- a/example/view/doc.go
+++ b/bridge/opencensus/version.go
@@ -12,5 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package main provides a code sample of using metric views to customize instruments.
-package main
+package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus"
+
+// Version is the current release version of the opencensus bridge.
+func Version() string {
+	return "0.44.0"
+}
diff --git a/bridge/opentracing/bridge.go b/bridge/opentracing/bridge.go
index 50ff21a191a..cc3d7d19c7f 100644
--- a/bridge/opentracing/bridge.go
+++ b/bridge/opentracing/bridge.go
@@ -33,10 +33,11 @@ import (
 	iBaggage "go.opentelemetry.io/otel/internal/baggage"
 	"go.opentelemetry.io/otel/propagation"
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/noop"
 )
 
 var (
-	noopTracer = trace.NewNoopTracerProvider().Tracer("")
+	noopTracer = noop.NewTracerProvider().Tracer("")
 	noopSpan   = func() trace.Span {
 		_, s := noopTracer.Start(context.Background(), "")
 		return s
@@ -317,8 +318,10 @@ type BridgeTracer struct {
 	propagator propagation.TextMapPropagator
 }
 
-var _ ot.Tracer = &BridgeTracer{}
-var _ ot.TracerContextWithSpanExtension = &BridgeTracer{}
+var (
+	_ ot.Tracer                         = &BridgeTracer{}
+	_ ot.TracerContextWithSpanExtension = &BridgeTracer{}
+)
 
 // NewBridgeTracer creates a new BridgeTracer. The new tracer forwards
 // the calls to the OpenTelemetry Noop tracer, so it should be
@@ -829,15 +832,13 @@ func newTextMapWrapperForInject(carrier interface{}) (*textMapWrapper, error) {
 	return t, nil
 }
 
-type textMapWriter struct {
-}
+type textMapWriter struct{}
 
 func (t *textMapWriter) Set(key string, value string) {
 	// maybe print a warning log.
 }
 
-type textMapReader struct {
-}
+type textMapReader struct{}
 
 func (t *textMapReader) ForeachKey(handler func(key, val string) error) error {
 	return nil // maybe print a warning log.
diff --git a/bridge/opentracing/bridge_test.go b/bridge/opentracing/bridge_test.go
index 47d26916a53..bbeb3f9fbfe 100644
--- a/bridge/opentracing/bridge_test.go
+++ b/bridge/opentracing/bridge_test.go
@@ -35,8 +35,7 @@ import (
 	"go.opentelemetry.io/otel/trace"
 )
 
-type testOnlyTextMapReader struct {
-}
+type testOnlyTextMapReader struct{}
 
 func newTestOnlyTextMapReader() *testOnlyTextMapReader {
 	return &testOnlyTextMapReader{}
@@ -144,8 +143,7 @@ var (
 	spanID     trace.SpanID  = [8]byte{byte(11)}
 )
 
-type testTextMapPropagator struct {
-}
+type testTextMapPropagator struct{}
 
 func (t testTextMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) {
 	carrier.Set(testHeader, strings.Join([]string{traceID.String(), spanID.String()}, ":"))
@@ -163,7 +161,7 @@ func (t testTextMapPropagator) Extract(ctx context.Context, carrier propagation.
 		return ctx
 	}
 
-	var exist = false
+	exist := false
 
 	for _, key := range carrier.Keys() {
 		if strings.EqualFold(testHeader, key) {
diff --git a/bridge/opentracing/go.mod b/bridge/opentracing/go.mod
index 8772e6f4ac6..c24a004e31e 100644
--- a/bridge/opentracing/go.mod
+++ b/bridge/opentracing/go.mod
@@ -1,6 +1,6 @@
 module go.opentelemetry.io/otel/bridge/opentracing
 
-go 1.19
+go 1.20
 
 replace go.opentelemetry.io/otel => ../..
 
@@ -9,16 +9,16 @@ replace go.opentelemetry.io/otel/trace => ../../trace
 require (
 	github.com/opentracing/opentracing-go v1.2.0
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/bridge/opentracing/go.sum b/bridge/opentracing/go.sum
index a075e034330..b79c74aee78 100644
--- a/bridge/opentracing/go.sum
+++ b/bridge/opentracing/go.sum
@@ -2,11 +2,11 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
 github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
diff --git a/bridge/opentracing/internal/mock.go b/bridge/opentracing/internal/mock.go
index 64e95604278..d3a8d91a30a 100644
--- a/bridge/opentracing/internal/mock.go
+++ b/bridge/opentracing/internal/mock.go
@@ -26,6 +26,8 @@ import (
 	"go.opentelemetry.io/otel/codes"
 	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+	"go.opentelemetry.io/otel/trace/noop"
 )
 
 //nolint:revive // ignoring missing comments for unexported global variables in an internal package.
@@ -44,6 +46,8 @@ type MockContextKeyValue struct {
 }
 
 type MockTracer struct {
+	embedded.Tracer
+
 	FinishedSpans         []*MockSpan
 	SpareTraceIDs         []trace.TraceID
 	SpareSpanIDs          []trace.SpanID
@@ -54,8 +58,10 @@ type MockTracer struct {
 	rand     *rand.Rand
 }
 
-var _ trace.Tracer = &MockTracer{}
-var _ migration.DeferredContextSetupTracerExtension = &MockTracer{}
+var (
+	_ trace.Tracer                                  = &MockTracer{}
+	_ migration.DeferredContextSetupTracerExtension = &MockTracer{}
+)
 
 func NewMockTracer() *MockTracer {
 	return &MockTracer{
@@ -182,6 +188,8 @@ type MockEvent struct {
 }
 
 type MockSpan struct {
+	embedded.Span
+
 	mockTracer     *MockTracer
 	officialTracer trace.Tracer
 	spanContext    trace.SpanContext
@@ -195,8 +203,10 @@ type MockSpan struct {
 	Events       []MockEvent
 }
 
-var _ trace.Span = &MockSpan{}
-var _ migration.OverrideTracerSpanExtension = &MockSpan{}
+var (
+	_ trace.Span                            = &MockSpan{}
+	_ migration.OverrideTracerSpanExtension = &MockSpan{}
+)
 
 func (s *MockSpan) SpanContext() trace.SpanContext {
 	return s.spanContext
@@ -291,4 +301,4 @@ func (s *MockSpan) OverrideTracer(tracer trace.Tracer) {
 	s.officialTracer = tracer
 }
 
-func (s *MockSpan) TracerProvider() trace.TracerProvider { return trace.NewNoopTracerProvider() }
+func (s *MockSpan) TracerProvider() trace.TracerProvider { return noop.NewTracerProvider() }
diff --git a/bridge/opentracing/provider.go b/bridge/opentracing/provider.go
index 941e277baf8..90bad0bd516 100644
--- a/bridge/opentracing/provider.go
+++ b/bridge/opentracing/provider.go
@@ -18,11 +18,14 @@ import (
 	"sync"
 
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
 )
 
 // TracerProvider is an OpenTelemetry TracerProvider that wraps an OpenTracing
 // Tracer.
 type TracerProvider struct {
+	embedded.TracerProvider
+
 	bridge   *BridgeTracer
 	provider trace.TracerProvider
 
diff --git a/bridge/opentracing/provider_test.go b/bridge/opentracing/provider_test.go
index 8af3796e031..1a4dd5ca5b5 100644
--- a/bridge/opentracing/provider_test.go
+++ b/bridge/opentracing/provider_test.go
@@ -19,6 +19,7 @@ import (
 
 	"go.opentelemetry.io/otel/bridge/opentracing/internal"
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
 )
 
 type namedMockTracer struct {
@@ -26,7 +27,7 @@ type namedMockTracer struct {
 	*internal.MockTracer
 }
 
-type namedMockTracerProvider struct{}
+type namedMockTracerProvider struct{ embedded.TracerProvider }
 
 var _ trace.TracerProvider = (*namedMockTracerProvider)(nil)
 
diff --git a/bridge/opentracing/test/bridge_grpc_test.go b/bridge/opentracing/test/bridge_grpc_test.go
index 4843cbfd260..6b2208f6d3c 100644
--- a/bridge/opentracing/test/bridge_grpc_test.go
+++ b/bridge/opentracing/test/bridge_grpc_test.go
@@ -38,12 +38,15 @@ type testGRPCServer struct{}
 func (*testGRPCServer) UnaryCall(ctx context.Context, r *testpb.SimpleRequest) (*testpb.SimpleResponse, error) {
 	return &testpb.SimpleResponse{Payload: r.Payload * 2}, nil
 }
+
 func (*testGRPCServer) StreamingOutputCall(*testpb.SimpleRequest, testpb.TestService_StreamingOutputCallServer) error {
 	return nil
 }
+
 func (*testGRPCServer) StreamingInputCall(testpb.TestService_StreamingInputCallServer) error {
 	return nil
 }
+
 func (*testGRPCServer) StreamingBidirectionalCall(testpb.TestService_StreamingBidirectionalCallServer) error {
 	return nil
 }
diff --git a/bridge/opentracing/test/go.mod b/bridge/opentracing/test/go.mod
index 4d0c5393327..b694aacb1ba 100644
--- a/bridge/opentracing/test/go.mod
+++ b/bridge/opentracing/test/go.mod
@@ -1,6 +1,6 @@
 module go.opentelemetry.io/otel/bridge/opentracing/test
 
-go 1.19
+go 1.20
 
 replace go.opentelemetry.io/otel => ../../..
 
@@ -12,23 +12,23 @@ require (
 	github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e
 	github.com/opentracing/opentracing-go v1.2.0
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/bridge/opentracing v1.16.0
-	google.golang.org/grpc v1.57.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/bridge/opentracing v1.21.0
+	google.golang.org/grpc v1.59.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/net v0.9.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	golang.org/x/text v0.9.0 // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
+	golang.org/x/net v0.17.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+	golang.org/x/text v0.13.0 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
 	google.golang.org/protobuf v1.31.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
diff --git a/bridge/opentracing/test/go.sum b/bridge/opentracing/test/go.sum
index 83ef51deb84..867315ee275 100644
--- a/bridge/opentracing/test/go.sum
+++ b/bridge/opentracing/test/go.sum
@@ -5,8 +5,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -18,7 +18,7 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg=
 github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo=
@@ -36,26 +36,26 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
-golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
 google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
-google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
diff --git a/bridge/opentracing/wrapper.go b/bridge/opentracing/wrapper.go
index 3e348c45523..1e065e3bc62 100644
--- a/bridge/opentracing/wrapper.go
+++ b/bridge/opentracing/wrapper.go
@@ -19,6 +19,7 @@ import (
 
 	"go.opentelemetry.io/otel/bridge/opentracing/migration"
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
 )
 
 // WrapperTracerProvider is an OpenTelemetry TracerProvider that wraps an
@@ -26,6 +27,8 @@ import (
 //
 // Deprecated: Use the TracerProvider from NewTracerProvider(...) instead.
 type WrapperTracerProvider struct {
+	embedded.TracerProvider
+
 	wTracer *WrapperTracer
 }
 
@@ -56,12 +59,16 @@ func NewWrappedTracerProvider(bridge *BridgeTracer, tracer trace.Tracer) *Wrappe
 // aware how to operate in environment where OpenTracing API is also
 // used.
 type WrapperTracer struct {
+	embedded.Tracer
+
 	bridge *BridgeTracer
 	tracer trace.Tracer
 }
 
-var _ trace.Tracer = &WrapperTracer{}
-var _ migration.DeferredContextSetupTracerExtension = &WrapperTracer{}
+var (
+	_ trace.Tracer                                  = &WrapperTracer{}
+	_ migration.DeferredContextSetupTracerExtension = &WrapperTracer{}
+)
 
 // NewWrapperTracer wraps the passed tracer and also talks to the
 // passed bridge tracer when setting up the context with the new
diff --git a/exporters/otlp/otlptrace/internal/otlpconfig/envconfig_test.go b/example/dice/doc.go
similarity index 88%
rename from exporters/otlp/otlptrace/internal/otlpconfig/envconfig_test.go
rename to example/dice/doc.go
index 25021f7328c..5fe156fb977 100644
--- a/exporters/otlp/otlptrace/internal/otlpconfig/envconfig_test.go
+++ b/example/dice/doc.go
@@ -12,4 +12,5 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package otlpconfig
+// Dice is the "Roll the dice" getting started example application.
+package main
diff --git a/example/dice/go.mod b/example/dice/go.mod
new file mode 100644
index 00000000000..4a974d178aa
--- /dev/null
+++ b/example/dice/go.mod
@@ -0,0 +1,35 @@
+module go.opentelemetry.io/otel/example/dice
+
+go 1.20
+
+require (
+	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0
+	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0
+	go.opentelemetry.io/otel/metric v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/sdk/metric v1.21.0
+)
+
+require (
+	github.com/felixge/httpsnoop v1.0.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
+	github.com/go-logr/stdr v1.2.2 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+)
+
+replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../../exporters/stdout/stdouttrace
+
+replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ../../exporters/stdout/stdoutmetric
+
+replace go.opentelemetry.io/otel => ../..
+
+replace go.opentelemetry.io/otel/trace => ../../trace
+
+replace go.opentelemetry.io/otel/metric => ../../metric
+
+replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric
+
+replace go.opentelemetry.io/otel/sdk => ../../sdk
diff --git a/example/dice/go.sum b/example/dice/go.sum
new file mode 100644
index 00000000000..33f7fb86e66
--- /dev/null
+++ b/example/dice/go.sum
@@ -0,0 +1,16 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/example/dice/main.go b/example/dice/main.go
new file mode 100644
index 00000000000..f7e0242906f
--- /dev/null
+++ b/example/dice/main.go
@@ -0,0 +1,99 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"errors"
+	"log"
+	"net"
+	"net/http"
+	"os"
+	"os/signal"
+	"time"
+
+	"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
+)
+
+func main() {
+	if err := run(); err != nil {
+		log.Fatalln(err)
+	}
+}
+
+func run() (err error) {
+	// Handle SIGINT (CTRL+C) gracefully.
+	ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
+	defer stop()
+
+	// Set up OpenTelemetry.
+	serviceName := "dice"
+	serviceVersion := "0.1.0"
+	otelShutdown, err := setupOTelSDK(ctx, serviceName, serviceVersion)
+	if err != nil {
+		return
+	}
+	// Handle shutdown properly so nothing leaks.
+	defer func() {
+		err = errors.Join(err, otelShutdown(context.Background()))
+	}()
+
+	// Start HTTP server.
+	srv := &http.Server{
+		Addr:         ":8080",
+		BaseContext:  func(_ net.Listener) context.Context { return ctx },
+		ReadTimeout:  time.Second,
+		WriteTimeout: 10 * time.Second,
+		Handler:      newHTTPHandler(),
+	}
+	srvErr := make(chan error, 1)
+	go func() {
+		srvErr <- srv.ListenAndServe()
+	}()
+
+	// Wait for interruption.
+	select {
+	case err = <-srvErr:
+		// Error when starting HTTP server.
+		return
+	case <-ctx.Done():
+		// Wait for first CTRL+C.
+		// Stop receiving signal notifications as soon as possible.
+		stop()
+	}
+
+	// When Shutdown is called, ListenAndServe immediately returns ErrServerClosed.
+	err = srv.Shutdown(context.Background())
+	return
+}
+
+func newHTTPHandler() http.Handler {
+	mux := http.NewServeMux()
+
+	// handleFunc is a replacement for mux.HandleFunc
+	// which enriches the handler's HTTP instrumentation with the pattern as the http.route.
+	handleFunc := func(pattern string, handlerFunc func(http.ResponseWriter, *http.Request)) {
+		// Configure the "http.route" for the HTTP instrumentation.
+		handler := otelhttp.WithRouteTag(pattern, http.HandlerFunc(handlerFunc))
+		mux.Handle(pattern, handler)
+	}
+
+	// Register handlers.
+	handleFunc("/rolldice", rolldice)
+
+	// Add HTTP instrumentation for the whole server.
+	handler := otelhttp.NewHandler(mux, "/")
+	return handler
+}
diff --git a/example/dice/otel.go b/example/dice/otel.go
new file mode 100644
index 00000000000..9c4f9fe8c42
--- /dev/null
+++ b/example/dice/otel.go
@@ -0,0 +1,130 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
+	"go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
+	"go.opentelemetry.io/otel/propagation"
+	"go.opentelemetry.io/otel/sdk/metric"
+	"go.opentelemetry.io/otel/sdk/resource"
+	"go.opentelemetry.io/otel/sdk/trace"
+	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
+)
+
+// setupOTelSDK bootstraps the OpenTelemetry pipeline.
+// If it does not return an error, make sure to call shutdown for proper cleanup.
+func setupOTelSDK(ctx context.Context, serviceName, serviceVersion string) (shutdown func(context.Context) error, err error) {
+	var shutdownFuncs []func(context.Context) error
+
+	// shutdown calls cleanup functions registered via shutdownFuncs.
+	// The errors from the calls are joined.
+	// Each registered cleanup will be invoked once.
+	shutdown = func(ctx context.Context) error {
+		var err error
+		for _, fn := range shutdownFuncs {
+			err = errors.Join(err, fn(ctx))
+		}
+		shutdownFuncs = nil
+		return err
+	}
+
+	// handleErr calls shutdown for cleanup and makes sure that all errors are returned.
+	handleErr := func(inErr error) {
+		err = errors.Join(inErr, shutdown(ctx))
+	}
+
+	// Set up resource.
+	res, err := newResource(serviceName, serviceVersion)
+	if err != nil {
+		handleErr(err)
+		return
+	}
+
+	// Set up propagator.
+	prop := newPropagator()
+	otel.SetTextMapPropagator(prop)
+
+	// Set up trace provider.
+	tracerProvider, err := newTraceProvider(res)
+	if err != nil {
+		handleErr(err)
+		return
+	}
+	shutdownFuncs = append(shutdownFuncs, tracerProvider.Shutdown)
+	otel.SetTracerProvider(tracerProvider)
+
+	// Set up meter provider.
+	meterProvider, err := newMeterProvider(res)
+	if err != nil {
+		handleErr(err)
+		return
+	}
+	shutdownFuncs = append(shutdownFuncs, meterProvider.Shutdown)
+	otel.SetMeterProvider(meterProvider)
+
+	return
+}
+
+func newResource(serviceName, serviceVersion string) (*resource.Resource, error) {
+	return resource.Merge(resource.Default(),
+		resource.NewWithAttributes(semconv.SchemaURL,
+			semconv.ServiceName(serviceName),
+			semconv.ServiceVersion(serviceVersion),
+		))
+}
+
+func newPropagator() propagation.TextMapPropagator {
+	return propagation.NewCompositeTextMapPropagator(
+		propagation.TraceContext{},
+		propagation.Baggage{},
+	)
+}
+
+func newTraceProvider(res *resource.Resource) (*trace.TracerProvider, error) {
+	traceExporter, err := stdouttrace.New(
+		stdouttrace.WithPrettyPrint())
+	if err != nil {
+		return nil, err
+	}
+
+	traceProvider := trace.NewTracerProvider(
+		trace.WithBatcher(traceExporter,
+			// Default is 5s. Set to 1s for demonstrative purposes.
+			trace.WithBatchTimeout(time.Second)),
+		trace.WithResource(res),
+	)
+	return traceProvider, nil
+}
+
+func newMeterProvider(res *resource.Resource) (*metric.MeterProvider, error) {
+	metricExporter, err := stdoutmetric.New()
+	if err != nil {
+		return nil, err
+	}
+
+	meterProvider := metric.NewMeterProvider(
+		metric.WithResource(res),
+		metric.WithReader(metric.NewPeriodicReader(metricExporter,
+			// Default is 1m. Set to 3s for demonstrative purposes.
+			metric.WithInterval(3*time.Second))),
+	)
+	return meterProvider, nil
+}
diff --git a/example/dice/rolldice.go b/example/dice/rolldice.go
new file mode 100644
index 00000000000..10bd237c325
--- /dev/null
+++ b/example/dice/rolldice.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"io"
+	"log"
+	"math/rand"
+	"net/http"
+	"strconv"
+
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/metric"
+)
+
+var (
+	tracer  = otel.Tracer("rolldice")
+	meter   = otel.Meter("rolldice")
+	rollCnt metric.Int64Counter
+)
+
+func init() {
+	var err error
+	rollCnt, err = meter.Int64Counter("dice.rolls",
+		metric.WithDescription("The number of rolls by roll value"),
+		metric.WithUnit("{roll}"))
+	if err != nil {
+		panic(err)
+	}
+}
+
+func rolldice(w http.ResponseWriter, r *http.Request) {
+	ctx, span := tracer.Start(r.Context(), "roll")
+	defer span.End()
+
+	roll := 1 + rand.Intn(6)
+
+	rollValueAttr := attribute.Int("roll.value", roll)
+	span.SetAttributes(rollValueAttr)
+	rollCnt.Add(ctx, 1, metric.WithAttributes(rollValueAttr))
+
+	resp := strconv.Itoa(roll) + "\n"
+	if _, err := io.WriteString(w, resp); err != nil {
+		log.Printf("Write failed: %v\n", err)
+	}
+}
diff --git a/example/fib/app.go b/example/fib/app.go
deleted file mode 100644
index a92074258fb..00000000000
--- a/example/fib/app.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"log"
-	"strconv"
-
-	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/codes"
-	"go.opentelemetry.io/otel/trace"
-)
-
-// name is the Tracer name used to identify this instrumentation library.
-const name = "fib"
-
-// App is an Fibonacci computation application.
-type App struct {
-	r io.Reader
-	l *log.Logger
-}
-
-// NewApp returns a new App.
-func NewApp(r io.Reader, l *log.Logger) *App {
-	return &App{r: r, l: l}
-}
-
-// Run starts polling users for Fibonacci number requests and writes results.
-func (a *App) Run(ctx context.Context) error {
-	for {
-		// Each execution of the run loop, we should get a new "root" span and context.
-		newCtx, span := otel.Tracer(name).Start(ctx, "Run")
-
-		n, err := a.Poll(newCtx)
-		if err != nil {
-			span.End()
-			return err
-		}
-
-		a.Write(newCtx, n)
-		span.End()
-	}
-}
-
-// Poll asks a user for input and returns the request.
-func (a *App) Poll(ctx context.Context) (uint, error) {
-	_, span := otel.Tracer(name).Start(ctx, "Poll")
-	defer span.End()
-
-	a.l.Print("What Fibonacci number would you like to know: ")
-
-	var n uint
-	_, err := fmt.Fscanf(a.r, "%d\n", &n)
-	if err != nil {
-		span.RecordError(err)
-		span.SetStatus(codes.Error, err.Error())
-		return 0, err
-	}
-
-	// Store n as a string to not overflow an int64.
-	nStr := strconv.FormatUint(uint64(n), 10)
-	span.SetAttributes(attribute.String("request.n", nStr))
-
-	return n, nil
-}
-
-// Write writes the n-th Fibonacci number back to the user.
-func (a *App) Write(ctx context.Context, n uint) {
-	var span trace.Span
-	ctx, span = otel.Tracer(name).Start(ctx, "Write")
-	defer span.End()
-
-	f, err := func(ctx context.Context) (uint64, error) {
-		_, span := otel.Tracer(name).Start(ctx, "Fibonacci")
-		defer span.End()
-		f, err := Fibonacci(n)
-		if err != nil {
-			span.RecordError(err)
-			span.SetStatus(codes.Error, err.Error())
-		}
-		return f, err
-	}(ctx)
-	if err != nil {
-		a.l.Printf("Fibonacci(%d): %v\n", n, err)
-	} else {
-		a.l.Printf("Fibonacci(%d) = %d\n", n, f)
-	}
-}
diff --git a/example/fib/go.mod b/example/fib/go.mod
deleted file mode 100644
index bfa54170291..00000000000
--- a/example/fib/go.mod
+++ /dev/null
@@ -1,27 +0,0 @@
-module go.opentelemetry.io/otel/example/fib
-
-go 1.19
-
-require (
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
-)
-
-require (
-	github.com/go-logr/logr v1.2.4 // indirect
-	github.com/go-logr/stdr v1.2.2 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-)
-
-replace go.opentelemetry.io/otel => ../..
-
-replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../../exporters/stdout/stdouttrace
-
-replace go.opentelemetry.io/otel/sdk => ../../sdk
-
-replace go.opentelemetry.io/otel/trace => ../../trace
-
-replace go.opentelemetry.io/otel/metric => ../../metric
diff --git a/example/fib/go.sum b/example/fib/go.sum
deleted file mode 100644
index bfe2cc3f315..00000000000
--- a/example/fib/go.sum
+++ /dev/null
@@ -1,12 +0,0 @@
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
-github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/example/fib/main.go b/example/fib/main.go
deleted file mode 100644
index 7e23b8eda73..00000000000
--- a/example/fib/main.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"context"
-	"io"
-	"log"
-	"os"
-	"os/signal"
-
-	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
-	"go.opentelemetry.io/otel/sdk/resource"
-	"go.opentelemetry.io/otel/sdk/trace"
-	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
-)
-
-// newExporter returns a console exporter.
-func newExporter(w io.Writer) (trace.SpanExporter, error) {
-	return stdouttrace.New(
-		stdouttrace.WithWriter(w),
-		// Use human readable output.
-		stdouttrace.WithPrettyPrint(),
-		// Do not print timestamps for the demo.
-		stdouttrace.WithoutTimestamps(),
-	)
-}
-
-// newResource returns a resource describing this application.
-func newResource() *resource.Resource {
-	r, _ := resource.Merge(
-		resource.Default(),
-		resource.NewWithAttributes(
-			semconv.SchemaURL,
-			semconv.ServiceName("fib"),
-			semconv.ServiceVersion("v0.1.0"),
-			attribute.String("environment", "demo"),
-		),
-	)
-	return r
-}
-
-func main() {
-	l := log.New(os.Stdout, "", 0)
-
-	// Write telemetry data to a file.
-	f, err := os.Create("traces.txt")
-	if err != nil {
-		l.Fatal(err)
-	}
-	defer f.Close()
-
-	exp, err := newExporter(f)
-	if err != nil {
-		l.Fatal(err)
-	}
-
-	tp := trace.NewTracerProvider(
-		trace.WithBatcher(exp),
-		trace.WithResource(newResource()),
-	)
-	defer func() {
-		if err := tp.Shutdown(context.Background()); err != nil {
-			l.Fatal(err)
-		}
-	}()
-	otel.SetTracerProvider(tp)
-
-	sigCh := make(chan os.Signal, 1)
-	signal.Notify(sigCh, os.Interrupt)
-
-	errCh := make(chan error)
-	app := NewApp(os.Stdin, l)
-	go func() {
-		errCh <- app.Run(context.Background())
-	}()
-
-	select {
-	case <-sigCh:
-		l.Println("\ngoodbye")
-		return
-	case err := <-errCh:
-		if err != nil {
-			l.Fatal(err)
-		}
-	}
-}
diff --git a/example/jaeger/go.mod b/example/jaeger/go.mod
deleted file mode 100644
index b2be8745d5a..00000000000
--- a/example/jaeger/go.mod
+++ /dev/null
@@ -1,33 +0,0 @@
-// Deprecated: This example is no longer supported as
-// [go.opentelemetry.io/otel/exporters/jaeger] is no longer supported.
-// OpenTelemetry dropped support for Jaeger exporter in July 2023.
-// Jaeger officially accepts and recommends using OTLP.
-// Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]
-// or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc] instead.
-module go.opentelemetry.io/otel/example/jaeger
-
-go 1.19
-
-replace (
-	go.opentelemetry.io/otel => ../..
-	go.opentelemetry.io/otel/exporters/jaeger => ../../exporters/jaeger
-	go.opentelemetry.io/otel/sdk => ../../sdk
-)
-
-require (
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/jaeger v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-)
-
-require (
-	github.com/go-logr/logr v1.2.4 // indirect
-	github.com/go-logr/stdr v1.2.2 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-)
-
-replace go.opentelemetry.io/otel/trace => ../../trace
-
-replace go.opentelemetry.io/otel/metric => ../../metric
diff --git a/example/jaeger/go.sum b/example/jaeger/go.sum
deleted file mode 100644
index 8089b278d80..00000000000
--- a/example/jaeger/go.sum
+++ /dev/null
@@ -1,13 +0,0 @@
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
-github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/example/jaeger/main.go b/example/jaeger/main.go
deleted file mode 100644
index d4f41f5f316..00000000000
--- a/example/jaeger/main.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Command jaeger is an example program that creates spans
-// and uploads to Jaeger.
-//
-// Deprecated:  This example is no longer supported as
-// [go.opentelemetry.io/otel/exporters/jaeger] is no longer supported.
-// OpenTelemetry dropped support for Jaeger exporter in July 2023.
-// Jaeger officially accepts and recommends using OTLP.
-// Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]
-// or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc] instead.
-package main
-
-import (
-	"context"
-	"log"
-	"time"
-
-	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/exporters/jaeger" //nolint:staticcheck // This is deprecated and will be removed in the next release.
-	"go.opentelemetry.io/otel/sdk/resource"
-	tracesdk "go.opentelemetry.io/otel/sdk/trace"
-	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
-)
-
-const (
-	service     = "trace-demo"
-	environment = "production"
-	id          = 1
-)
-
-// tracerProvider returns an OpenTelemetry TracerProvider configured to use
-// the Jaeger exporter that will send spans to the provided url. The returned
-// TracerProvider will also use a Resource configured with all the information
-// about the application.
-func tracerProvider(url string) (*tracesdk.TracerProvider, error) {
-	// Create the Jaeger exporter
-	exp, err := jaeger.New(jaeger.WithCollectorEndpoint(jaeger.WithEndpoint(url)))
-	if err != nil {
-		return nil, err
-	}
-	tp := tracesdk.NewTracerProvider(
-		// Always be sure to batch in production.
-		tracesdk.WithBatcher(exp),
-		// Record information about this application in a Resource.
-		tracesdk.WithResource(resource.NewWithAttributes(
-			semconv.SchemaURL,
-			semconv.ServiceName(service),
-			attribute.String("environment", environment),
-			attribute.Int64("ID", id),
-		)),
-	)
-	return tp, nil
-}
-
-func main() {
-	tp, err := tracerProvider("http://localhost:14268/api/traces")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	// Register our TracerProvider as the global so any imported
-	// instrumentation in the future will default to using it.
-	otel.SetTracerProvider(tp)
-
-	ctx, cancel := context.WithCancel(context.Background())
-	defer cancel()
-
-	// Cleanly shutdown and flush telemetry when the application exits.
-	defer func(ctx context.Context) {
-		// Do not make the application hang when it is shutdown.
-		ctx, cancel = context.WithTimeout(ctx, time.Second*5)
-		defer cancel()
-		if err := tp.Shutdown(ctx); err != nil {
-			log.Fatal(err)
-		}
-	}(ctx)
-
-	tr := tp.Tracer("component-main")
-
-	ctx, span := tr.Start(ctx, "foo")
-	defer span.End()
-
-	bar(ctx)
-}
-
-func bar(ctx context.Context) {
-	// Use the global TracerProvider.
-	tr := otel.Tracer("component-bar")
-	_, span := tr.Start(ctx, "bar")
-	span.SetAttributes(attribute.Key("testset").String("value"))
-	defer span.End()
-
-	// Do bar...
-}
diff --git a/example/namedtracer/foo/foo.go b/example/namedtracer/foo/foo.go
index 1193f8ad018..074912e26e1 100644
--- a/example/namedtracer/foo/foo.go
+++ b/example/namedtracer/foo/foo.go
@@ -22,9 +22,7 @@ import (
 	"go.opentelemetry.io/otel/trace"
 )
 
-var (
-	lemonsKey = attribute.Key("ex.com/lemons")
-)
+var lemonsKey = attribute.Key("ex.com/lemons")
 
 // SubOperation is an example to demonstrate the use of named tracer.
 // It creates a named tracer with its package path.
diff --git a/example/namedtracer/go.mod b/example/namedtracer/go.mod
index 40a4ca0d780..9b29c30a388 100644
--- a/example/namedtracer/go.mod
+++ b/example/namedtracer/go.mod
@@ -1,6 +1,6 @@
 module go.opentelemetry.io/otel/example/namedtracer
 
-go 1.19
+go 1.20
 
 replace (
 	go.opentelemetry.io/otel => ../..
@@ -9,16 +9,16 @@ replace (
 
 require (
 	github.com/go-logr/stdr v1.2.2
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 )
 
 require (
-	github.com/go-logr/logr v1.2.4 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 )
 
 replace go.opentelemetry.io/otel/trace => ../../trace
diff --git a/example/namedtracer/go.sum b/example/namedtracer/go.sum
index bfe2cc3f315..8fd671228e6 100644
--- a/example/namedtracer/go.sum
+++ b/example/namedtracer/go.sum
@@ -1,12 +1,12 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/example/opencensus/go.mod b/example/opencensus/go.mod
index 5807ecfd1f2..08f23c9f233 100644
--- a/example/opencensus/go.mod
+++ b/example/opencensus/go.mod
@@ -1,6 +1,6 @@
 module go.opentelemetry.io/otel/example/opencensus
 
-go 1.19
+go 1.20
 
 replace (
 	go.opentelemetry.io/otel => ../..
@@ -10,21 +10,21 @@ replace (
 
 require (
 	go.opencensus.io v0.24.0
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/bridge/opencensus v0.39.0
-	go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.39.0
-	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/sdk/metric v0.39.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/bridge/opencensus v0.44.0
+	go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.44.0
+	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/sdk/metric v1.21.0
 )
 
 require (
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 )
 
 replace go.opentelemetry.io/otel/metric => ../../metric
diff --git a/example/opencensus/go.sum b/example/opencensus/go.sum
index 8661ceb6298..6e76c6aed70 100644
--- a/example/opencensus/go.sum
+++ b/example/opencensus/go.sum
@@ -11,8 +11,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
 github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -34,7 +34,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
 github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -68,8 +68,8 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
diff --git a/example/opencensus/main.go b/example/opencensus/main.go
index c9709bad37a..4d1d1d04beb 100644
--- a/example/opencensus/main.go
+++ b/example/opencensus/main.go
@@ -78,8 +78,7 @@ func tracing(otExporter sdktrace.SpanExporter) {
 	otel.SetTracerProvider(tp)
 
 	log.Println("Installing the OpenCensus bridge to make OpenCensus libraries write spans using OpenTelemetry.")
-	tracer := tp.Tracer("simple")
-	octrace.DefaultTracer = opencensus.NewTracer(tracer)
+	opencensus.InstallTraceBridge()
 	tp.ForceFlush(ctx)
 
 	log.Println("Creating OpenCensus span, which should be printed out using the OpenTelemetry stdouttrace exporter.\n-- It should have no parent, since it is the first span.")
@@ -88,7 +87,7 @@ func tracing(otExporter sdktrace.SpanExporter) {
 	tp.ForceFlush(ctx)
 
 	log.Println("Creating OpenTelemetry span\n-- It should have the OpenCensus span as a parent, since the OpenCensus span was written with using OpenTelemetry APIs.")
-	ctx, otspan := tracer.Start(ctx, "OpenTelemetrySpan")
+	ctx, otspan := tp.Tracer("simple").Start(ctx, "OpenTelemetrySpan")
 	otspan.End()
 	tp.ForceFlush(ctx)
 
diff --git a/example/otel-collector/go.mod b/example/otel-collector/go.mod
index c2d967bda81..160f549e5c4 100644
--- a/example/otel-collector/go.mod
+++ b/example/otel-collector/go.mod
@@ -1,6 +1,6 @@
 module go.opentelemetry.io/otel/example/otel-collector
 
-go 1.19
+go 1.20
 
 replace (
 	go.opentelemetry.io/otel => ../..
@@ -8,27 +8,27 @@ replace (
 )
 
 require (
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
-	google.golang.org/grpc v1.57.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
+	google.golang.org/grpc v1.59.0
 )
 
 require (
 	github.com/cenkalti/backoff/v4 v4.2.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
-	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
 	go.opentelemetry.io/proto/otlp v1.0.0 // indirect
-	golang.org/x/net v0.10.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	golang.org/x/text v0.9.0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
+	golang.org/x/net v0.17.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+	golang.org/x/text v0.13.0 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
 	google.golang.org/protobuf v1.31.0 // indirect
 )
 
diff --git a/example/otel-collector/go.sum b/example/otel-collector/go.sum
index 9d85029550d..b4b0ab6d9a6 100644
--- a/example/otel-collector/go.sum
+++ b/example/otel-collector/go.sum
@@ -2,37 +2,37 @@ github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqy
 github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
+github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
 go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
-google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
-google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
diff --git a/example/passthrough/go.mod b/example/passthrough/go.mod
index 4d1ead190fb..46bcb0f0da0 100644
--- a/example/passthrough/go.mod
+++ b/example/passthrough/go.mod
@@ -1,19 +1,19 @@
 module go.opentelemetry.io/otel/example/passthrough
 
-go 1.19
+go 1.20
 
 require (
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 )
 
 require (
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 )
 
 replace (
diff --git a/example/passthrough/go.sum b/example/passthrough/go.sum
index bfe2cc3f315..8fd671228e6 100644
--- a/example/passthrough/go.sum
+++ b/example/passthrough/go.sum
@@ -1,12 +1,12 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/example/prometheus/go.mod b/example/prometheus/go.mod
index 510392cdfce..ee7193eac17 100644
--- a/example/prometheus/go.mod
+++ b/example/prometheus/go.mod
@@ -1,28 +1,28 @@
 module go.opentelemetry.io/otel/example/prometheus
 
-go 1.19
+go 1.20
 
 require (
-	github.com/prometheus/client_golang v1.16.0
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/prometheus v0.39.0
-	go.opentelemetry.io/otel/metric v1.16.0
-	go.opentelemetry.io/otel/sdk/metric v0.39.0
+	github.com/prometheus/client_golang v1.17.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/exporters/prometheus v0.44.0
+	go.opentelemetry.io/otel/metric v1.21.0
+	go.opentelemetry.io/otel/sdk/metric v1.21.0
 )
 
 require (
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/cespare/xxhash/v2 v2.2.0 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
-	github.com/prometheus/client_model v0.4.0 // indirect
-	github.com/prometheus/common v0.42.0 // indirect
-	github.com/prometheus/procfs v0.10.1 // indirect
-	go.opentelemetry.io/otel/sdk v1.16.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	github.com/prometheus/client_model v0.5.0 // indirect
+	github.com/prometheus/common v0.44.0 // indirect
+	github.com/prometheus/procfs v0.11.1 // indirect
+	go.opentelemetry.io/otel/sdk v1.21.0 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 	google.golang.org/protobuf v1.31.0 // indirect
 )
 
diff --git a/example/prometheus/go.sum b/example/prometheus/go.sum
index e5afa2cf689..5a19e44b683 100644
--- a/example/prometheus/go.sum
+++ b/example/prometheus/go.sum
@@ -4,8 +4,8 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
 github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -13,22 +13,22 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
 github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
-github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
-github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
-github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
-github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
-github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
-github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
-github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
diff --git a/example/prometheus/main.go b/example/prometheus/main.go
index 3c7e4db7976..777135cebc2 100644
--- a/example/prometheus/main.go
+++ b/example/prometheus/main.go
@@ -32,6 +32,8 @@ import (
 	"go.opentelemetry.io/otel/sdk/metric"
 )
 
+const meterName = "github.com/open-telemetry/opentelemetry-go/example/prometheus"
+
 func main() {
 	rng := rand.New(rand.NewSource(time.Now().UnixNano()))
 	ctx := context.Background()
@@ -44,7 +46,7 @@ func main() {
 		log.Fatal(err)
 	}
 	provider := metric.NewMeterProvider(metric.WithReader(exporter))
-	meter := provider.Meter("github.com/open-telemetry/opentelemetry-go/example/prometheus")
+	meter := provider.Meter(meterName)
 
 	// Start the prometheus HTTP server and pass the exporter Collector to it
 	go serveMetrics()
@@ -75,14 +77,18 @@ func main() {
 	}
 
 	// This is the equivalent of prometheus.NewHistogramVec
-	histogram, err := meter.Float64Histogram("baz", api.WithDescription("a very nice histogram"))
+	histogram, err := meter.Float64Histogram(
+		"baz",
+		api.WithDescription("a histogram with custom buckets and rename"),
+		api.WithExplicitBucketBoundaries(64, 128, 256, 512, 1024, 2048, 4096),
+	)
 	if err != nil {
 		log.Fatal(err)
 	}
-	histogram.Record(ctx, 23, opt)
-	histogram.Record(ctx, 7, opt)
-	histogram.Record(ctx, 101, opt)
-	histogram.Record(ctx, 105, opt)
+	histogram.Record(ctx, 136, opt)
+	histogram.Record(ctx, 64, opt)
+	histogram.Record(ctx, 701, opt)
+	histogram.Record(ctx, 830, opt)
 
 	ctx, _ = signal.NotifyContext(ctx, os.Interrupt)
 	<-ctx.Done()
@@ -91,7 +97,7 @@ func main() {
 func serveMetrics() {
 	log.Printf("serving metrics at localhost:2223/metrics")
 	http.Handle("/metrics", promhttp.Handler())
-	err := http.ListenAndServe(":2223", nil)
+	err := http.ListenAndServe(":2223", nil) //nolint:gosec // Ignoring G114: Use of net/http serve function that has no support for setting timeouts.
 	if err != nil {
 		fmt.Printf("error serving http: %v", err)
 		return
diff --git a/example/view/go.mod b/example/view/go.mod
deleted file mode 100644
index bfac7cea8dd..00000000000
--- a/example/view/go.mod
+++ /dev/null
@@ -1,39 +0,0 @@
-module go.opentelemetry.io/otel/example/view
-
-go 1.19
-
-require (
-	github.com/prometheus/client_golang v1.16.0
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/prometheus v0.39.0
-	go.opentelemetry.io/otel/metric v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/sdk/metric v0.39.0
-)
-
-require (
-	github.com/beorn7/perks v1.0.1 // indirect
-	github.com/cespare/xxhash/v2 v2.2.0 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
-	github.com/go-logr/stdr v1.2.2 // indirect
-	github.com/golang/protobuf v1.5.3 // indirect
-	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
-	github.com/prometheus/client_model v0.4.0 // indirect
-	github.com/prometheus/common v0.42.0 // indirect
-	github.com/prometheus/procfs v0.10.1 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	google.golang.org/protobuf v1.31.0 // indirect
-)
-
-replace go.opentelemetry.io/otel => ../..
-
-replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus
-
-replace go.opentelemetry.io/otel/sdk => ../../sdk
-
-replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric
-
-replace go.opentelemetry.io/otel/metric => ../../metric
-
-replace go.opentelemetry.io/otel/trace => ../../trace
diff --git a/example/view/go.sum b/example/view/go.sum
deleted file mode 100644
index e5afa2cf689..00000000000
--- a/example/view/go.sum
+++ /dev/null
@@ -1,37 +0,0 @@
-github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
-github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
-github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
-github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
-github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
-github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
-github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
-github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
-github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
-github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
-github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/example/view/main.go b/example/view/main.go
deleted file mode 100644
index 712e325301e..00000000000
--- a/example/view/main.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package main
-
-import (
-	"context"
-	"fmt"
-	"log"
-	"net/http"
-	"os"
-	"os/signal"
-
-	"github.com/prometheus/client_golang/prometheus/promhttp"
-
-	"go.opentelemetry.io/otel/attribute"
-	otelprom "go.opentelemetry.io/otel/exporters/prometheus"
-	api "go.opentelemetry.io/otel/metric"
-	"go.opentelemetry.io/otel/sdk/instrumentation"
-	"go.opentelemetry.io/otel/sdk/metric"
-)
-
-const meterName = "github.com/open-telemetry/opentelemetry-go/example/view"
-
-func main() {
-	ctx := context.Background()
-
-	// The exporter embeds a default OpenTelemetry Reader, allowing it to be used in WithReader.
-	exporter, err := otelprom.New()
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	provider := metric.NewMeterProvider(
-		metric.WithReader(exporter),
-		// View to customize histogram buckets and rename a single histogram instrument.
-		metric.WithView(metric.NewView(
-			metric.Instrument{
-				Name:  "custom_histogram",
-				Scope: instrumentation.Scope{Name: meterName},
-			},
-			metric.Stream{
-				Name: "bar",
-				Aggregation: metric.AggregationExplicitBucketHistogram{
-					Boundaries: []float64{64, 128, 256, 512, 1024, 2048, 4096},
-				},
-			},
-		)),
-	)
-	meter := provider.Meter(meterName)
-
-	// Start the prometheus HTTP server and pass the exporter Collector to it
-	go serveMetrics()
-
-	opt := api.WithAttributes(
-		attribute.Key("A").String("B"),
-		attribute.Key("C").String("D"),
-	)
-
-	counter, err := meter.Float64Counter("foo", api.WithDescription("a simple counter"))
-	if err != nil {
-		log.Fatal(err)
-	}
-	counter.Add(ctx, 5, opt)
-
-	histogram, err := meter.Float64Histogram("custom_histogram", api.WithDescription("a histogram with custom buckets and rename"))
-	if err != nil {
-		log.Fatal(err)
-	}
-	histogram.Record(ctx, 136, opt)
-	histogram.Record(ctx, 64, opt)
-	histogram.Record(ctx, 701, opt)
-	histogram.Record(ctx, 830, opt)
-
-	ctx, _ = signal.NotifyContext(ctx, os.Interrupt)
-	<-ctx.Done()
-}
-
-func serveMetrics() {
-	log.Printf("serving metrics at localhost:2222/metrics")
-	http.Handle("/metrics", promhttp.Handler())
-	err := http.ListenAndServe(":2222", nil)
-	if err != nil {
-		fmt.Printf("error serving http: %v", err)
-		return
-	}
-}
diff --git a/example/zipkin/go.mod b/example/zipkin/go.mod
index e86ed2ddd7b..8f638e80419 100644
--- a/example/zipkin/go.mod
+++ b/example/zipkin/go.mod
@@ -1,6 +1,6 @@
 module go.opentelemetry.io/otel/example/zipkin
 
-go 1.19
+go 1.20
 
 replace (
 	go.opentelemetry.io/otel => ../..
@@ -9,18 +9,18 @@ replace (
 )
 
 require (
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/zipkin v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/exporters/zipkin v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 )
 
 require (
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/openzipkin/zipkin-go v0.4.2 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 )
 
 replace go.opentelemetry.io/otel/trace => ../../trace
diff --git a/example/zipkin/go.sum b/example/zipkin/go.sum
index 5a0de303faf..24549e67bc2 100644
--- a/example/zipkin/go.sum
+++ b/example/zipkin/go.sum
@@ -1,14 +1,14 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA=
 github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/exporters/README.md b/exporters/README.md
index 58561902d43..995226c8d55 100644
--- a/exporters/README.md
+++ b/exporters/README.md
@@ -7,14 +7,16 @@ This package contains exporters for this purpose.
 
 The following exporter packages are provided with the following OpenTelemetry signal support.
 
-| Exporter Package                                                                | Metrics | Traces |
-| :-----------------------------------------------------------------------------: | :-----: | :----: |
-| [go.opentelemetry.io/otel/exporters/otlp/otlpmetric](./otlp/otlpmetric)         | ✓       |        |
-| [go.opentelemetry.io/otel/exporters/otlp/otlptrace](./otlp/otlptrace)           |         | ✓      |
-| [go.opentelemetry.io/otel/exporters/prometheus](./prometheus)                   | ✓       |        |
-| [go.opentelemetry.io/otel/exporters/stdout/stdoutmetric](./stdout/stdoutmetric) | ✓       |        |
-| [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](./stdout/stdouttrace)   |         | ✓      |
-| [go.opentelemetry.io/otel/exporters/zipkin](./zipkin)                           |         | ✓      |
+|                                           Exporter Package                                            | Metrics | Traces |
+|:-----------------------------------------------------------------------------------------------------:|:-------:|:------:|
+| [go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc](./otlp/otlpmetric/otlpmetricgrpc) |    ✓    |        |
+| [go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp](./otlp/otlpmetric/otlpmetrichttp) |    ✓    |        |
+|   [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](./otlp/otlptrace/otlptracegrpc)   |         |   ✓    |
+|   [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](./otlp/otlptrace/otlptracehttp)   |         |   ✓    |
+|                     [go.opentelemetry.io/otel/exporters/prometheus](./prometheus)                     |    ✓    |        |
+|            [go.opentelemetry.io/otel/exporters/stdout/stdoutmetric](./stdout/stdoutmetric)            |    ✓    |        |
+|             [go.opentelemetry.io/otel/exporters/stdout/stdouttrace](./stdout/stdouttrace)             |         |   ✓    |
+|                         [go.opentelemetry.io/otel/exporters/zipkin](./zipkin)                         |         |   ✓    |
 
 See the [OpenTelemetry registry] for 3rd-party exporters compatible with this project.
 
diff --git a/exporters/jaeger/README.md b/exporters/jaeger/README.md
deleted file mode 100644
index 439bf79a90f..00000000000
--- a/exporters/jaeger/README.md
+++ /dev/null
@@ -1,56 +0,0 @@
-# OpenTelemetry-Go Jaeger Exporter
-
-[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/jaeger.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger)
-
-> **Deprecated:** This module is no longer supported.
-> OpenTelemetry dropped support for Jaeger exporter in July 2023.
-> Jaeger officially accepts and recommends using OTLP.
-> Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)
-> or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) instead.
-
-[OpenTelemetry span exporter for Jaeger](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/sdk_exporters/jaeger.md) implementation.
-
-## Installation
-
-```
-go get -u go.opentelemetry.io/otel/exporters/jaeger
-```
-
-## Example
-
-See [../../example/jaeger](../../example/jaeger).
-
-## Configuration
-
-The exporter can be used to send spans to:
-
-- Jaeger agent using `jaeger.thrift` over compact thrift protocol via
-  [`WithAgentEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentEndpoint) option.
-- Jaeger collector using `jaeger.thrift` over HTTP via
-  [`WithCollectorEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithCollectorEndpoint) option.
-
-### Environment Variables
-
-The following environment variables can be used
-(instead of options objects) to override the default configuration.
-
-| Environment variable              | Option                                                                                        | Default value                       |
-| --------------------------------- | --------------------------------------------------------------------------------------------- | ----------------------------------- |
-| `OTEL_EXPORTER_JAEGER_AGENT_HOST` | [`WithAgentHost`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentHost) | `localhost`                         |
-| `OTEL_EXPORTER_JAEGER_AGENT_PORT` | [`WithAgentPort`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithAgentPort) | `6831`                              |
-| `OTEL_EXPORTER_JAEGER_ENDPOINT`   | [`WithEndpoint`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithEndpoint)   | `http://localhost:14268/api/traces` |
-| `OTEL_EXPORTER_JAEGER_USER`       | [`WithUsername`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithUsername)   |                                     |
-| `OTEL_EXPORTER_JAEGER_PASSWORD`   | [`WithPassword`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/jaeger#WithPassword)   |                                     |
-
-Configuration using options have precedence over the environment variables.
-
-## Contributing
-
-This exporter uses a vendored copy of the Apache Thrift library (v0.14.1) at a custom import path.
-When re-generating Thrift code in the future, please adapt import paths as necessary.
-
-## References
-
-- [Jaeger](https://www.jaegertracing.io/)
-- [OpenTelemetry to Jaeger Transformation](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/sdk_exporters/jaeger.md)
-- [OpenTelemetry Environment Variable Specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/sdk-environment-variables.md#jaeger-exporter)
diff --git a/exporters/jaeger/agent.go b/exporters/jaeger/agent.go
deleted file mode 100644
index a050020bb47..00000000000
--- a/exporters/jaeger/agent.go
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
-	"context"
-	"fmt"
-	"io"
-	"net"
-	"strings"
-	"time"
-
-	"github.com/go-logr/logr"
-
-	genAgent "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent"
-	gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-const (
-	// udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent.
-	udpPacketMaxLength = 65000
-	// emitBatchOverhead is the additional overhead bytes used for enveloping the datagram,
-	// synced with jaeger-agent https://github.com/jaegertracing/jaeger-client-go/blob/master/transport_udp.go#L37
-	emitBatchOverhead = 70
-)
-
-// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface.
-type agentClientUDP struct {
-	genAgent.Agent
-	io.Closer
-
-	connUDP        udpConn
-	client         *genAgent.AgentClient
-	maxPacketSize  int                   // max size of datagram in bytes
-	thriftBuffer   *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
-	thriftProtocol thrift.TProtocol
-}
-
-type udpConn interface {
-	Write([]byte) (int, error)
-	SetWriteBuffer(int) error
-	Close() error
-}
-
-type agentClientUDPParams struct {
-	Host                     string
-	Port                     string
-	MaxPacketSize            int
-	Logger                   logr.Logger
-	AttemptReconnecting      bool
-	AttemptReconnectInterval time.Duration
-}
-
-// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
-func newAgentClientUDP(params agentClientUDPParams) (*agentClientUDP, error) {
-	hostPort := net.JoinHostPort(params.Host, params.Port)
-	// validate hostport
-	if _, _, err := net.SplitHostPort(hostPort); err != nil {
-		return nil, err
-	}
-
-	if params.MaxPacketSize <= 0 || params.MaxPacketSize > udpPacketMaxLength {
-		params.MaxPacketSize = udpPacketMaxLength
-	}
-
-	if params.AttemptReconnecting && params.AttemptReconnectInterval <= 0 {
-		params.AttemptReconnectInterval = time.Second * 30
-	}
-
-	thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
-	protocolFactory := thrift.NewTCompactProtocolFactoryConf(&thrift.TConfiguration{})
-	thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
-	client := genAgent.NewAgentClientFactory(thriftBuffer, protocolFactory)
-
-	var connUDP udpConn
-	var err error
-
-	if params.AttemptReconnecting {
-		// host is hostname, setup resolver loop in case host record changes during operation
-		connUDP, err = newReconnectingUDPConn(hostPort, params.MaxPacketSize, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger)
-		if err != nil {
-			return nil, err
-		}
-	} else {
-		destAddr, err := net.ResolveUDPAddr("udp", hostPort)
-		if err != nil {
-			return nil, err
-		}
-
-		connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil {
-		return nil, err
-	}
-
-	return &agentClientUDP{
-		connUDP:        connUDP,
-		client:         client,
-		maxPacketSize:  params.MaxPacketSize,
-		thriftBuffer:   thriftBuffer,
-		thriftProtocol: thriftProtocol,
-	}, nil
-}
-
-// EmitBatch buffers batch to fit into UDP packets and sends the data to the agent.
-func (a *agentClientUDP) EmitBatch(ctx context.Context, batch *gen.Batch) error {
-	var errs []error
-	processSize, err := a.calcSizeOfSerializedThrift(ctx, batch.Process)
-	if err != nil {
-		// drop the batch if serialization of process fails.
-		return err
-	}
-
-	maxPacketSize := a.maxPacketSize
-	if maxPacketSize > udpPacketMaxLength-emitBatchOverhead {
-		maxPacketSize = udpPacketMaxLength - emitBatchOverhead
-	}
-	totalSize := processSize
-	var spans []*gen.Span
-	for _, span := range batch.Spans {
-		spanSize, err := a.calcSizeOfSerializedThrift(ctx, span)
-		if err != nil {
-			errs = append(errs, fmt.Errorf("thrift serialization failed: %v", span))
-			continue
-		}
-		if spanSize+processSize >= maxPacketSize {
-			// drop the span that exceeds the limit.
-			errs = append(errs, fmt.Errorf("span too large to send: %v", span))
-			continue
-		}
-		if totalSize+spanSize >= maxPacketSize {
-			if err := a.flush(ctx, &gen.Batch{
-				Process: batch.Process,
-				Spans:   spans,
-			}); err != nil {
-				errs = append(errs, err)
-			}
-			spans = spans[:0]
-			totalSize = processSize
-		}
-		totalSize += spanSize
-		spans = append(spans, span)
-	}
-
-	if len(spans) > 0 {
-		if err := a.flush(ctx, &gen.Batch{
-			Process: batch.Process,
-			Spans:   spans,
-		}); err != nil {
-			errs = append(errs, err)
-		}
-	}
-
-	if len(errs) == 1 {
-		return errs[0]
-	} else if len(errs) > 1 {
-		joined := a.makeJoinedErrorString(errs)
-		return fmt.Errorf("multiple errors during transform: %s", joined)
-	}
-	return nil
-}
-
-// makeJoinedErrorString join all the errors to one error message.
-func (a *agentClientUDP) makeJoinedErrorString(errs []error) string {
-	var errMsgs []string
-	for _, err := range errs {
-		errMsgs = append(errMsgs, err.Error())
-	}
-	return strings.Join(errMsgs, ", ")
-}
-
-// flush will send the batch of spans to the agent.
-func (a *agentClientUDP) flush(ctx context.Context, batch *gen.Batch) error {
-	a.thriftBuffer.Reset()
-	if err := a.client.EmitBatch(ctx, batch); err != nil {
-		return err
-	}
-	if a.thriftBuffer.Len() > a.maxPacketSize {
-		return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d",
-			a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
-	}
-	_, err := a.connUDP.Write(a.thriftBuffer.Bytes())
-	return err
-}
-
-// calcSizeOfSerializedThrift calculate the serialized thrift packet size.
-func (a *agentClientUDP) calcSizeOfSerializedThrift(ctx context.Context, thriftStruct thrift.TStruct) (int, error) {
-	a.thriftBuffer.Reset()
-	err := thriftStruct.Write(ctx, a.thriftProtocol)
-	return a.thriftBuffer.Len(), err
-}
-
-// Close implements Close() of io.Closer and closes the underlying UDP connection.
-func (a *agentClientUDP) Close() error {
-	return a.connUDP.Close()
-}
diff --git a/exporters/jaeger/agent_test.go b/exporters/jaeger/agent_test.go
deleted file mode 100644
index 95070341722..00000000000
--- a/exporters/jaeger/agent_test.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
-	"context"
-	"net"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-
-	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/sdk/trace/tracetest"
-)
-
-func TestNewAgentClientUDPWithParamsBadHostport(t *testing.T) {
-	agentClient, err := newAgentClientUDP(agentClientUDPParams{
-		Host: "blahblah",
-		Port: "",
-	})
-	assert.Error(t, err)
-	assert.Nil(t, agentClient)
-}
-
-func TestNewAgentClientUDPWithParams(t *testing.T) {
-	mockServer, err := newUDPListener()
-	require.NoError(t, err)
-	defer mockServer.Close()
-	host, port, err := net.SplitHostPort(mockServer.LocalAddr().String())
-	assert.NoError(t, err)
-
-	agentClient, err := newAgentClientUDP(agentClientUDPParams{
-		Host:                host,
-		Port:                port,
-		MaxPacketSize:       25000,
-		AttemptReconnecting: true,
-	})
-	assert.NoError(t, err)
-	assert.NotNil(t, agentClient)
-	assert.Equal(t, 25000, agentClient.maxPacketSize)
-
-	if assert.IsType(t, &reconnectingUDPConn{}, agentClient.connUDP) {
-		assert.Equal(t, emptyLogger, agentClient.connUDP.(*reconnectingUDPConn).logger)
-	}
-
-	assert.NoError(t, agentClient.Close())
-}
-
-func TestNewAgentClientUDPWithParamsDefaults(t *testing.T) {
-	mockServer, err := newUDPListener()
-	require.NoError(t, err)
-	defer mockServer.Close()
-	host, port, err := net.SplitHostPort(mockServer.LocalAddr().String())
-	assert.NoError(t, err)
-
-	agentClient, err := newAgentClientUDP(agentClientUDPParams{
-		Host:                host,
-		Port:                port,
-		AttemptReconnecting: true,
-	})
-	assert.NoError(t, err)
-	assert.NotNil(t, agentClient)
-	assert.Equal(t, udpPacketMaxLength, agentClient.maxPacketSize)
-
-	if assert.IsType(t, &reconnectingUDPConn{}, agentClient.connUDP) {
-		assert.Equal(t, emptyLogger, agentClient.connUDP.(*reconnectingUDPConn).logger)
-	}
-
-	assert.NoError(t, agentClient.Close())
-}
-
-func TestNewAgentClientUDPWithParamsReconnectingDisabled(t *testing.T) {
-	mockServer, err := newUDPListener()
-	require.NoError(t, err)
-	defer mockServer.Close()
-	host, port, err := net.SplitHostPort(mockServer.LocalAddr().String())
-	assert.NoError(t, err)
-
-	agentClient, err := newAgentClientUDP(agentClientUDPParams{
-		Host:                host,
-		Port:                port,
-		Logger:              emptyLogger,
-		AttemptReconnecting: false,
-	})
-	assert.NoError(t, err)
-	assert.NotNil(t, agentClient)
-	assert.Equal(t, udpPacketMaxLength, agentClient.maxPacketSize)
-
-	assert.IsType(t, &net.UDPConn{}, agentClient.connUDP)
-
-	assert.NoError(t, agentClient.Close())
-}
-
-type errorHandler struct{ t *testing.T }
-
-func (eh errorHandler) Handle(err error) { assert.NoError(eh.t, err) }
-
-func TestJaegerAgentUDPLimitBatching(t *testing.T) {
-	otel.SetErrorHandler(errorHandler{t})
-
-	mockServer, err := newUDPListener()
-	require.NoError(t, err)
-	defer mockServer.Close()
-	host, port, err := net.SplitHostPort(mockServer.LocalAddr().String())
-	assert.NoError(t, err)
-
-	// 1500 spans, size 79559, does not fit within one UDP packet with the default size of 65000.
-	n := 1500
-	s := make(tracetest.SpanStubs, n).Snapshots()
-
-	exp, err := New(
-		WithAgentEndpoint(WithAgentHost(host), WithAgentPort(port)),
-	)
-	require.NoError(t, err)
-
-	ctx := context.Background()
-	assert.NoError(t, exp.ExportSpans(ctx, s))
-	assert.NoError(t, exp.Shutdown(ctx))
-}
-
-// generateALargeSpan generates a span with a long name.
-func generateALargeSpan() tracetest.SpanStub {
-	return tracetest.SpanStub{
-		Name: "a-longer-name-that-makes-it-exceeds-limit",
-	}
-}
-
-func TestSpanExceedsMaxPacketLimit(t *testing.T) {
-	otel.SetErrorHandler(errorHandler{t})
-
-	mockServer, err := newUDPListener()
-	require.NoError(t, err)
-	defer mockServer.Close()
-	host, port, err := net.SplitHostPort(mockServer.LocalAddr().String())
-	assert.NoError(t, err)
-
-	// 106 is the serialized size of a span with default values.
-	maxSize := 106
-
-	largeSpans := tracetest.SpanStubs{generateALargeSpan(), {}}.Snapshots()
-	normalSpans := tracetest.SpanStubs{{}, {}}.Snapshots()
-
-	exp, err := New(
-		WithAgentEndpoint(WithAgentHost(host), WithAgentPort(port), WithMaxPacketSize(maxSize+1)),
-	)
-	require.NoError(t, err)
-
-	ctx := context.Background()
-	assert.Error(t, exp.ExportSpans(ctx, largeSpans))
-	assert.NoError(t, exp.ExportSpans(ctx, normalSpans))
-	assert.NoError(t, exp.Shutdown(ctx))
-}
-
-func TestEmitBatchWithMultipleErrors(t *testing.T) {
-	otel.SetErrorHandler(errorHandler{t})
-
-	mockServer, err := newUDPListener()
-	require.NoError(t, err)
-	defer mockServer.Close()
-	host, port, err := net.SplitHostPort(mockServer.LocalAddr().String())
-	assert.NoError(t, err)
-
-	span := generateALargeSpan()
-	largeSpans := tracetest.SpanStubs{span, span}.Snapshots()
-	// make max packet size smaller than span
-	maxSize := len(span.Name)
-	exp, err := New(
-		WithAgentEndpoint(WithAgentHost(host), WithAgentPort(port), WithMaxPacketSize(maxSize)),
-	)
-	require.NoError(t, err)
-
-	ctx := context.Background()
-	err = exp.ExportSpans(ctx, largeSpans)
-	assert.Error(t, err)
-	require.Contains(t, err.Error(), "multiple errors")
-}
diff --git a/exporters/jaeger/assertsocketbuffersize_test.go b/exporters/jaeger/assertsocketbuffersize_test.go
deleted file mode 100644
index 8d21f6550df..00000000000
--- a/exporters/jaeger/assertsocketbuffersize_test.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package jaeger
-
-import (
-	"net"
-	"runtime"
-	"syscall"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func assertSockBufferSize(t *testing.T, expectedBytes int, conn *net.UDPConn) bool {
-	fd, err := conn.File()
-	if !assert.NoError(t, err) {
-		return false
-	}
-
-	bufferBytes, err := syscall.GetsockoptInt(int(fd.Fd()), syscall.SOL_SOCKET, syscall.SO_SNDBUF)
-	if !assert.NoError(t, err) {
-		return false
-	}
-
-	// The linux kernel doubles SO_SNDBUF value (to allow space for
-	// bookkeeping overhead) when it is set using setsockopt(2), and this
-	// doubled value is returned by getsockopt(2)
-	// https://linux.die.net/man/7/socket
-	if runtime.GOOS == "linux" {
-		return assert.GreaterOrEqual(t, expectedBytes*2, bufferBytes)
-	}
-
-	return assert.Equal(t, expectedBytes, bufferBytes)
-}
diff --git a/exporters/jaeger/assertsocketbuffersize_windows_test.go b/exporters/jaeger/assertsocketbuffersize_windows_test.go
deleted file mode 100644
index 9fba4ba4d34..00000000000
--- a/exporters/jaeger/assertsocketbuffersize_windows_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows
-
-package jaeger
-
-import (
-	"net"
-	"testing"
-)
-
-func assertSockBufferSize(t *testing.T, expectedBytes int, conn *net.UDPConn) bool {
-	// The Windows implementation of the net.UDPConn does not implement the
-	// functionality to return a file handle, instead a "not supported" error
-	// is returned:
-	//
-	// https://github.com/golang/go/blob/6cc8aa7ece96aca282db19f08aa5c98ed13695d9/src/net/fd_windows.go#L175-L178
-	//
-	// This means we are not able to pass the connection to a syscall and
-	// determine the buffer size.
-	return true
-}
diff --git a/exporters/jaeger/doc.go b/exporters/jaeger/doc.go
deleted file mode 100644
index a7359654110..00000000000
--- a/exporters/jaeger/doc.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package jaeger contains an OpenTelemetry tracing exporter for Jaeger.
-//
-// Deprecated: This module is no longer supported.
-// OpenTelemetry dropped support for Jaeger exporter in July 2023.
-// Jaeger officially accepts and recommends using OTLP.
-// Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]
-// or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc] instead.
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
diff --git a/exporters/jaeger/env.go b/exporters/jaeger/env.go
deleted file mode 100644
index 460fb5e1352..00000000000
--- a/exporters/jaeger/env.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
-	"os"
-)
-
-// Environment variable names.
-const (
-	// Hostname for the Jaeger agent, part of address where exporter sends spans
-	// i.e.	"localhost".
-	envAgentHost = "OTEL_EXPORTER_JAEGER_AGENT_HOST"
-	// Port for the Jaeger agent, part of address where exporter sends spans
-	// i.e. 6831.
-	envAgentPort = "OTEL_EXPORTER_JAEGER_AGENT_PORT"
-	// The HTTP endpoint for sending spans directly to a collector,
-	// i.e. http://jaeger-collector:14268/api/traces.
-	envEndpoint = "OTEL_EXPORTER_JAEGER_ENDPOINT"
-	// Username to send as part of "Basic" authentication to the collector endpoint.
-	envUser = "OTEL_EXPORTER_JAEGER_USER"
-	// Password to send as part of "Basic" authentication to the collector endpoint.
-	envPassword = "OTEL_EXPORTER_JAEGER_PASSWORD"
-)
-
-// envOr returns an env variable's value if it is exists or the default if not.
-func envOr(key, defaultValue string) string {
-	if v := os.Getenv(key); v != "" {
-		return v
-	}
-	return defaultValue
-}
diff --git a/exporters/jaeger/env_test.go b/exporters/jaeger/env_test.go
deleted file mode 100644
index f9219d28b84..00000000000
--- a/exporters/jaeger/env_test.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
-	"os"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-
-	ottest "go.opentelemetry.io/otel/exporters/jaeger/internal/internaltest"
-)
-
-func TestNewRawExporterWithDefault(t *testing.T) {
-	const (
-		collectorEndpoint = "http://localhost:14268/api/traces"
-		username          = ""
-		password          = ""
-	)
-
-	// Create Jaeger Exporter with default values
-	exp, err := New(
-		WithCollectorEndpoint(),
-	)
-
-	assert.NoError(t, err)
-
-	require.IsType(t, &collectorUploader{}, exp.uploader)
-	uploader := exp.uploader.(*collectorUploader)
-	assert.Equal(t, collectorEndpoint, uploader.endpoint)
-	assert.Equal(t, username, uploader.username)
-	assert.Equal(t, password, uploader.password)
-}
-
-func TestNewRawExporterWithEnv(t *testing.T) {
-	const (
-		collectorEndpoint = "http://localhost"
-		username          = "user"
-		password          = "password"
-	)
-
-	envStore, err := ottest.SetEnvVariables(map[string]string{
-		envEndpoint: collectorEndpoint,
-		envUser:     username,
-		envPassword: password,
-	})
-	require.NoError(t, err)
-	defer func() {
-		require.NoError(t, envStore.Restore())
-	}()
-
-	// Create Jaeger Exporter with environment variables
-	exp, err := New(
-		WithCollectorEndpoint(),
-	)
-
-	assert.NoError(t, err)
-
-	require.IsType(t, &collectorUploader{}, exp.uploader)
-	uploader := exp.uploader.(*collectorUploader)
-	assert.Equal(t, collectorEndpoint, uploader.endpoint)
-	assert.Equal(t, username, uploader.username)
-	assert.Equal(t, password, uploader.password)
-}
-
-func TestNewRawExporterWithPassedOption(t *testing.T) {
-	const (
-		collectorEndpoint = "http://localhost"
-		username          = "user"
-		password          = "password"
-		optionEndpoint    = "should not be overwritten"
-	)
-
-	envStore, err := ottest.SetEnvVariables(map[string]string{
-		envEndpoint: collectorEndpoint,
-		envUser:     username,
-		envPassword: password,
-	})
-	require.NoError(t, err)
-	defer func() {
-		require.NoError(t, envStore.Restore())
-	}()
-
-	// Create Jaeger Exporter with passed endpoint option, should be used over envEndpoint
-	exp, err := New(
-		WithCollectorEndpoint(WithEndpoint(optionEndpoint)),
-	)
-
-	assert.NoError(t, err)
-
-	require.IsType(t, &collectorUploader{}, exp.uploader)
-	uploader := exp.uploader.(*collectorUploader)
-	assert.Equal(t, optionEndpoint, uploader.endpoint)
-	assert.Equal(t, username, uploader.username)
-	assert.Equal(t, password, uploader.password)
-}
-
-func TestEnvOrWithAgentHostPortFromEnv(t *testing.T) {
-	testCases := []struct {
-		name         string
-		envAgentHost string
-		envAgentPort string
-		defaultHost  string
-		defaultPort  string
-		expectedHost string
-		expectedPort string
-	}{
-		{
-			name:         "overrides default host/port values via environment variables",
-			envAgentHost: "localhost",
-			envAgentPort: "6832",
-			defaultHost:  "hostNameToBeReplaced",
-			defaultPort:  "8203",
-			expectedHost: "localhost",
-			expectedPort: "6832",
-		},
-		{
-			name:         "envAgentHost is empty, will not overwrite default host value",
-			envAgentHost: "",
-			envAgentPort: "6832",
-			defaultHost:  "hostNameNotToBeReplaced",
-			defaultPort:  "8203",
-			expectedHost: "hostNameNotToBeReplaced",
-			expectedPort: "6832",
-		},
-		{
-			name:         "envAgentPort is empty, will not overwrite default port value",
-			envAgentHost: "localhost",
-			envAgentPort: "",
-			defaultHost:  "hostNameToBeReplaced",
-			defaultPort:  "8203",
-			expectedHost: "localhost",
-			expectedPort: "8203",
-		},
-		{
-			name:         "envAgentHost and envAgentPort are empty, will not overwrite default host/port values",
-			envAgentHost: "",
-			envAgentPort: "",
-			defaultHost:  "hostNameNotToBeReplaced",
-			defaultPort:  "8203",
-			expectedHost: "hostNameNotToBeReplaced",
-			expectedPort: "8203",
-		},
-	}
-
-	envStore := ottest.NewEnvStore()
-	envStore.Record(envAgentHost)
-	envStore.Record(envAgentPort)
-	defer func() {
-		require.NoError(t, envStore.Restore())
-	}()
-	for _, tc := range testCases {
-		t.Run(tc.name, func(t *testing.T) {
-			require.NoError(t, os.Setenv(envAgentHost, tc.envAgentHost))
-			require.NoError(t, os.Setenv(envAgentPort, tc.envAgentPort))
-			host := envOr(envAgentHost, tc.defaultHost)
-			port := envOr(envAgentPort, tc.defaultPort)
-			assert.Equal(t, tc.expectedHost, host)
-			assert.Equal(t, tc.expectedPort, port)
-		})
-	}
-}
-
-func TestEnvOrWithCollectorEndpointOptionsFromEnv(t *testing.T) {
-	testCases := []struct {
-		name                             string
-		envEndpoint                      string
-		envUsername                      string
-		envPassword                      string
-		defaultCollectorEndpointOptions  collectorEndpointConfig
-		expectedCollectorEndpointOptions collectorEndpointConfig
-	}{
-		{
-			name:        "overrides value via environment variables",
-			envEndpoint: "http://localhost:14252",
-			envUsername: "username",
-			envPassword: "password",
-			defaultCollectorEndpointOptions: collectorEndpointConfig{
-				endpoint: "endpoint not to be used",
-				username: "foo",
-				password: "bar",
-			},
-			expectedCollectorEndpointOptions: collectorEndpointConfig{
-				endpoint: "http://localhost:14252",
-				username: "username",
-				password: "password",
-			},
-		},
-		{
-			name:        "environment variables is empty, will not overwrite value",
-			envEndpoint: "",
-			envUsername: "",
-			envPassword: "",
-			defaultCollectorEndpointOptions: collectorEndpointConfig{
-				endpoint: "endpoint to be used",
-				username: "foo",
-				password: "bar",
-			},
-			expectedCollectorEndpointOptions: collectorEndpointConfig{
-				endpoint: "endpoint to be used",
-				username: "foo",
-				password: "bar",
-			},
-		},
-	}
-
-	envStore := ottest.NewEnvStore()
-	envStore.Record(envEndpoint)
-	envStore.Record(envUser)
-	envStore.Record(envPassword)
-	defer func() {
-		require.NoError(t, envStore.Restore())
-	}()
-	for _, tc := range testCases {
-		t.Run(tc.name, func(t *testing.T) {
-			require.NoError(t, os.Setenv(envEndpoint, tc.envEndpoint))
-			require.NoError(t, os.Setenv(envUser, tc.envUsername))
-			require.NoError(t, os.Setenv(envPassword, tc.envPassword))
-
-			endpoint := envOr(envEndpoint, tc.defaultCollectorEndpointOptions.endpoint)
-			username := envOr(envUser, tc.defaultCollectorEndpointOptions.username)
-			password := envOr(envPassword, tc.defaultCollectorEndpointOptions.password)
-
-			assert.Equal(t, tc.expectedCollectorEndpointOptions.endpoint, endpoint)
-			assert.Equal(t, tc.expectedCollectorEndpointOptions.username, username)
-			assert.Equal(t, tc.expectedCollectorEndpointOptions.password, password)
-		})
-	}
-}
diff --git a/exporters/jaeger/go.mod b/exporters/jaeger/go.mod
deleted file mode 100644
index cef7005a963..00000000000
--- a/exporters/jaeger/go.mod
+++ /dev/null
@@ -1,35 +0,0 @@
-// Deprecated: This module is no longer supported.
-// OpenTelemetry dropped support for Jaeger exporter in July 2023.
-// Jaeger officially accepts and recommends using OTLP.
-// Use [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp]
-// or [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc] instead.
-module go.opentelemetry.io/otel/exporters/jaeger
-
-go 1.19
-
-require (
-	github.com/go-logr/logr v1.2.4
-	github.com/go-logr/stdr v1.2.2
-	github.com/google/go-cmp v0.5.9
-	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
-)
-
-require (
-	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/stretchr/objx v0.5.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	gopkg.in/yaml.v3 v3.0.1 // indirect
-)
-
-replace go.opentelemetry.io/otel/trace => ../../trace
-
-replace go.opentelemetry.io/otel => ../..
-
-replace go.opentelemetry.io/otel/sdk => ../../sdk
-
-replace go.opentelemetry.io/otel/metric => ../../metric
diff --git a/exporters/jaeger/go.sum b/exporters/jaeger/go.sum
deleted file mode 100644
index e874ee50e97..00000000000
--- a/exporters/jaeger/go.sum
+++ /dev/null
@@ -1,27 +0,0 @@
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
-github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go b/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go
deleted file mode 100644
index 54cd3b0867a..00000000000
--- a/exporters/jaeger/internal/gen-go/agent/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package agent
-
-var GoUnusedProtection__ int;
-
diff --git a/exporters/jaeger/internal/gen-go/agent/agent-consts.go b/exporters/jaeger/internal/gen-go/agent/agent-consts.go
deleted file mode 100644
index 3b96e3222ee..00000000000
--- a/exporters/jaeger/internal/gen-go/agent/agent-consts.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package agent
-
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"time"
-
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-var _ = jaeger.GoUnusedProtection__
-var _ = zipkincore.GoUnusedProtection__
-
-func init() {
-}
diff --git a/exporters/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go b/exporters/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go
deleted file mode 100755
index 9ec0d40a865..00000000000
--- a/exporters/jaeger/internal/gen-go/agent/agent-remote/agent-remote.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package main
-
-import (
-	"context"
-	"flag"
-	"fmt"
-	"math"
-	"net"
-	"net/url"
-	"os"
-	"strconv"
-	"strings"
-
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/agent"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-var _ = jaeger.GoUnusedProtection__
-var _ = zipkincore.GoUnusedProtection__
-var _ = agent.GoUnusedProtection__
-
-func Usage() {
-	fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
-	flag.PrintDefaults()
-	fmt.Fprintln(os.Stderr, "\nFunctions:")
-	fmt.Fprintln(os.Stderr, "  void emitZipkinBatch( spans)")
-	fmt.Fprintln(os.Stderr, "  void emitBatch(Batch batch)")
-	fmt.Fprintln(os.Stderr)
-	os.Exit(0)
-}
-
-type httpHeaders map[string]string
-
-func (h httpHeaders) String() string {
-	var m map[string]string = h
-	return fmt.Sprintf("%s", m)
-}
-
-func (h httpHeaders) Set(value string) error {
-	parts := strings.Split(value, ": ")
-	if len(parts) != 2 {
-		return fmt.Errorf("header should be of format 'Key: Value'")
-	}
-	h[parts[0]] = parts[1]
-	return nil
-}
-
-func main() {
-	flag.Usage = Usage
-	var host string
-	var port int
-	var protocol string
-	var urlString string
-	var framed bool
-	var useHttp bool
-	headers := make(httpHeaders)
-	var parsedUrl *url.URL
-	var trans thrift.TTransport
-	_ = strconv.Atoi
-	_ = math.Abs
-	flag.Usage = Usage
-	flag.StringVar(&host, "h", "localhost", "Specify host and port")
-	flag.IntVar(&port, "p", 9090, "Specify port")
-	flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
-	flag.StringVar(&urlString, "u", "", "Specify the url")
-	flag.BoolVar(&framed, "framed", false, "Use framed transport")
-	flag.BoolVar(&useHttp, "http", false, "Use http")
-	flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")")
-	flag.Parse()
-
-	if len(urlString) > 0 {
-		var err error
-		parsedUrl, err = url.Parse(urlString)
-		if err != nil {
-			fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
-			flag.Usage()
-		}
-		host = parsedUrl.Host
-		useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https"
-	} else if useHttp {
-		_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
-		if err != nil {
-			fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
-			flag.Usage()
-		}
-	}
-
-	cmd := flag.Arg(0)
-	var err error
-	if useHttp {
-		trans, err = thrift.NewTHttpClient(parsedUrl.String())
-		if len(headers) > 0 {
-			httptrans := trans.(*thrift.THttpClient)
-			for key, value := range headers {
-				httptrans.SetHeader(key, value)
-			}
-		}
-	} else {
-		portStr := fmt.Sprint(port)
-		if strings.Contains(host, ":") {
-			host, portStr, err = net.SplitHostPort(host)
-			if err != nil {
-				fmt.Fprintln(os.Stderr, "error with host:", err)
-				os.Exit(1)
-			}
-		}
-		trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
-		if err != nil {
-			fmt.Fprintln(os.Stderr, "error resolving address:", err)
-			os.Exit(1)
-		}
-		if framed {
-			trans = thrift.NewTFramedTransport(trans)
-		}
-	}
-	if err != nil {
-		fmt.Fprintln(os.Stderr, "Error creating transport", err)
-		os.Exit(1)
-	}
-	defer trans.Close()
-	var protocolFactory thrift.TProtocolFactory
-	switch protocol {
-	case "compact":
-		protocolFactory = thrift.NewTCompactProtocolFactory()
-		break
-	case "simplejson":
-		protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
-		break
-	case "json":
-		protocolFactory = thrift.NewTJSONProtocolFactory()
-		break
-	case "binary", "":
-		protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
-		break
-	default:
-		fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
-		Usage()
-		os.Exit(1)
-	}
-	iprot := protocolFactory.GetProtocol(trans)
-	oprot := protocolFactory.GetProtocol(trans)
-	client := agent.NewAgentClient(thrift.NewTStandardClient(iprot, oprot))
-	if err := trans.Open(); err != nil {
-		fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
-		os.Exit(1)
-	}
-
-	switch cmd {
-	case "emitZipkinBatch":
-		if flag.NArg()-1 != 1 {
-			fmt.Fprintln(os.Stderr, "EmitZipkinBatch requires 1 args")
-			flag.Usage()
-		}
-		arg5 := flag.Arg(1)
-		mbTrans6 := thrift.NewTMemoryBufferLen(len(arg5))
-		defer mbTrans6.Close()
-		_, err7 := mbTrans6.WriteString(arg5)
-		if err7 != nil {
-			Usage()
-			return
-		}
-		factory8 := thrift.NewTJSONProtocolFactory()
-		jsProt9 := factory8.GetProtocol(mbTrans6)
-		containerStruct0 := agent.NewAgentEmitZipkinBatchArgs()
-		err10 := containerStruct0.ReadField1(context.Background(), jsProt9)
-		if err10 != nil {
-			Usage()
-			return
-		}
-		argvalue0 := containerStruct0.Spans
-		value0 := argvalue0
-		fmt.Print(client.EmitZipkinBatch(context.Background(), value0))
-		fmt.Print("\n")
-		break
-	case "emitBatch":
-		if flag.NArg()-1 != 1 {
-			fmt.Fprintln(os.Stderr, "EmitBatch requires 1 args")
-			flag.Usage()
-		}
-		arg11 := flag.Arg(1)
-		mbTrans12 := thrift.NewTMemoryBufferLen(len(arg11))
-		defer mbTrans12.Close()
-		_, err13 := mbTrans12.WriteString(arg11)
-		if err13 != nil {
-			Usage()
-			return
-		}
-		factory14 := thrift.NewTJSONProtocolFactory()
-		jsProt15 := factory14.GetProtocol(mbTrans12)
-		argvalue0 := jaeger.NewBatch()
-		err16 := argvalue0.Read(context.Background(), jsProt15)
-		if err16 != nil {
-			Usage()
-			return
-		}
-		value0 := argvalue0
-		fmt.Print(client.EmitBatch(context.Background(), value0))
-		fmt.Print("\n")
-		break
-	case "":
-		Usage()
-		break
-	default:
-		fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
-	}
-}
diff --git a/exporters/jaeger/internal/gen-go/agent/agent.go b/exporters/jaeger/internal/gen-go/agent/agent.go
deleted file mode 100644
index c7c8e9ca3e6..00000000000
--- a/exporters/jaeger/internal/gen-go/agent/agent.go
+++ /dev/null
@@ -1,412 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package agent
-
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"time"
-
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-var _ = jaeger.GoUnusedProtection__
-var _ = zipkincore.GoUnusedProtection__
-
-type Agent interface {
-	// Parameters:
-	//  - Spans
-	EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
-	// Parameters:
-	//  - Batch
-	EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
-}
-
-type AgentClient struct {
-	c    thrift.TClient
-	meta thrift.ResponseMeta
-}
-
-func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
-	return &AgentClient{
-		c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
-	}
-}
-
-func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
-	return &AgentClient{
-		c: thrift.NewTStandardClient(iprot, oprot),
-	}
-}
-
-func NewAgentClient(c thrift.TClient) *AgentClient {
-	return &AgentClient{
-		c: c,
-	}
-}
-
-func (p *AgentClient) Client_() thrift.TClient {
-	return p.c
-}
-
-func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
-	return p.meta
-}
-
-func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
-	p.meta = meta
-}
-
-// Parameters:
-//  - Spans
-func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
-	var _args0 AgentEmitZipkinBatchArgs
-	_args0.Spans = spans
-	p.SetLastResponseMeta_(thrift.ResponseMeta{})
-	if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
-		return err
-	}
-	return nil
-}
-
-// Parameters:
-//  - Batch
-func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
-	var _args1 AgentEmitBatchArgs
-	_args1.Batch = batch
-	p.SetLastResponseMeta_(thrift.ResponseMeta{})
-	if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
-		return err
-	}
-	return nil
-}
-
-type AgentProcessor struct {
-	processorMap map[string]thrift.TProcessorFunction
-	handler      Agent
-}
-
-func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
-	p.processorMap[key] = processor
-}
-
-func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
-	processor, ok = p.processorMap[key]
-	return processor, ok
-}
-
-func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
-	return p.processorMap
-}
-
-func NewAgentProcessor(handler Agent) *AgentProcessor {
-
-	self2 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
-	self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler: handler}
-	self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler}
-	return self2
-}
-
-func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
-	if err2 != nil {
-		return false, thrift.WrapTException(err2)
-	}
-	if processor, ok := p.GetProcessorFunction(name); ok {
-		return processor.Process(ctx, seqId, iprot, oprot)
-	}
-	iprot.Skip(ctx, thrift.STRUCT)
-	iprot.ReadMessageEnd(ctx)
-	x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
-	oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
-	x3.Write(ctx, oprot)
-	oprot.WriteMessageEnd(ctx)
-	oprot.Flush(ctx)
-	return false, x3
-
-}
-
-type agentProcessorEmitZipkinBatch struct {
-	handler Agent
-}
-
-func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := AgentEmitZipkinBatchArgs{}
-	var err2 error
-	if err2 = args.Read(ctx, iprot); err2 != nil {
-		iprot.ReadMessageEnd(ctx)
-		return false, thrift.WrapTException(err2)
-	}
-	iprot.ReadMessageEnd(ctx)
-
-	tickerCancel := func() {}
-	_ = tickerCancel
-
-	if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
-		tickerCancel()
-		return true, thrift.WrapTException(err2)
-	}
-	tickerCancel()
-	return true, nil
-}
-
-type agentProcessorEmitBatch struct {
-	handler Agent
-}
-
-func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := AgentEmitBatchArgs{}
-	var err2 error
-	if err2 = args.Read(ctx, iprot); err2 != nil {
-		iprot.ReadMessageEnd(ctx)
-		return false, thrift.WrapTException(err2)
-	}
-	iprot.ReadMessageEnd(ctx)
-
-	tickerCancel := func() {}
-	_ = tickerCancel
-
-	if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
-		tickerCancel()
-		return true, thrift.WrapTException(err2)
-	}
-	tickerCancel()
-	return true, nil
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-//  - Spans
-type AgentEmitZipkinBatchArgs struct {
-	Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
-}
-
-func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
-	return &AgentEmitZipkinBatchArgs{}
-}
-
-func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
-	return p.Spans
-}
-func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*zipkincore.Span, 0, size)
-	p.Spans = tSlice
-	for i := 0; i < size; i++ {
-		_elem4 := &zipkincore.Span{}
-		if err := _elem4.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
-		}
-		p.Spans = append(p.Spans, _elem4)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
-	}
-	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Spans {
-		if err := v.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(ctx); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
-	}
-	return err
-}
-
-func (p *AgentEmitZipkinBatchArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
-}
-
-// Attributes:
-//  - Batch
-type AgentEmitBatchArgs struct {
-	Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
-}
-
-func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
-	return &AgentEmitBatchArgs{}
-}
-
-var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
-
-func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
-	if !p.IsSetBatch() {
-		return AgentEmitBatchArgs_Batch_DEFAULT
-	}
-	return p.Batch
-}
-func (p *AgentEmitBatchArgs) IsSetBatch() bool {
-	return p.Batch != nil
-}
-
-func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.STRUCT {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	p.Batch = &jaeger.Batch{}
-	if err := p.Batch.Read(ctx, iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
-	}
-	return nil
-}
-
-func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err)
-	}
-	if err := p.Batch.Write(ctx, oprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err)
-	}
-	return err
-}
-
-func (p *AgentEmitBatchArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
-}
diff --git a/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go b/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go
deleted file mode 100644
index fe45a9f9ad2..00000000000
--- a/exporters/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package jaeger
-
-var GoUnusedProtection__ int;
-
diff --git a/exporters/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go b/exporters/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go
deleted file mode 100755
index cacf75d00f5..00000000000
--- a/exporters/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package main
-
-import (
-	"context"
-	"flag"
-	"fmt"
-	"math"
-	"net"
-	"net/url"
-	"os"
-	"strconv"
-	"strings"
-
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-var _ = jaeger.GoUnusedProtection__
-
-func Usage() {
-	fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
-	flag.PrintDefaults()
-	fmt.Fprintln(os.Stderr, "\nFunctions:")
-	fmt.Fprintln(os.Stderr, "   submitBatches( batches)")
-	fmt.Fprintln(os.Stderr)
-	os.Exit(0)
-}
-
-type httpHeaders map[string]string
-
-func (h httpHeaders) String() string {
-	var m map[string]string = h
-	return fmt.Sprintf("%s", m)
-}
-
-func (h httpHeaders) Set(value string) error {
-	parts := strings.Split(value, ": ")
-	if len(parts) != 2 {
-		return fmt.Errorf("header should be of format 'Key: Value'")
-	}
-	h[parts[0]] = parts[1]
-	return nil
-}
-
-func main() {
-	flag.Usage = Usage
-	var host string
-	var port int
-	var protocol string
-	var urlString string
-	var framed bool
-	var useHttp bool
-	headers := make(httpHeaders)
-	var parsedUrl *url.URL
-	var trans thrift.TTransport
-	_ = strconv.Atoi
-	_ = math.Abs
-	flag.Usage = Usage
-	flag.StringVar(&host, "h", "localhost", "Specify host and port")
-	flag.IntVar(&port, "p", 9090, "Specify port")
-	flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
-	flag.StringVar(&urlString, "u", "", "Specify the url")
-	flag.BoolVar(&framed, "framed", false, "Use framed transport")
-	flag.BoolVar(&useHttp, "http", false, "Use http")
-	flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")")
-	flag.Parse()
-
-	if len(urlString) > 0 {
-		var err error
-		parsedUrl, err = url.Parse(urlString)
-		if err != nil {
-			fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
-			flag.Usage()
-		}
-		host = parsedUrl.Host
-		useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https"
-	} else if useHttp {
-		_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
-		if err != nil {
-			fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
-			flag.Usage()
-		}
-	}
-
-	cmd := flag.Arg(0)
-	var err error
-	if useHttp {
-		trans, err = thrift.NewTHttpClient(parsedUrl.String())
-		if len(headers) > 0 {
-			httptrans := trans.(*thrift.THttpClient)
-			for key, value := range headers {
-				httptrans.SetHeader(key, value)
-			}
-		}
-	} else {
-		portStr := fmt.Sprint(port)
-		if strings.Contains(host, ":") {
-			host, portStr, err = net.SplitHostPort(host)
-			if err != nil {
-				fmt.Fprintln(os.Stderr, "error with host:", err)
-				os.Exit(1)
-			}
-		}
-		trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
-		if err != nil {
-			fmt.Fprintln(os.Stderr, "error resolving address:", err)
-			os.Exit(1)
-		}
-		if framed {
-			trans = thrift.NewTFramedTransport(trans)
-		}
-	}
-	if err != nil {
-		fmt.Fprintln(os.Stderr, "Error creating transport", err)
-		os.Exit(1)
-	}
-	defer trans.Close()
-	var protocolFactory thrift.TProtocolFactory
-	switch protocol {
-	case "compact":
-		protocolFactory = thrift.NewTCompactProtocolFactory()
-		break
-	case "simplejson":
-		protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
-		break
-	case "json":
-		protocolFactory = thrift.NewTJSONProtocolFactory()
-		break
-	case "binary", "":
-		protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
-		break
-	default:
-		fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
-		Usage()
-		os.Exit(1)
-	}
-	iprot := protocolFactory.GetProtocol(trans)
-	oprot := protocolFactory.GetProtocol(trans)
-	client := jaeger.NewCollectorClient(thrift.NewTStandardClient(iprot, oprot))
-	if err := trans.Open(); err != nil {
-		fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
-		os.Exit(1)
-	}
-
-	switch cmd {
-	case "submitBatches":
-		if flag.NArg()-1 != 1 {
-			fmt.Fprintln(os.Stderr, "SubmitBatches requires 1 args")
-			flag.Usage()
-		}
-		arg19 := flag.Arg(1)
-		mbTrans20 := thrift.NewTMemoryBufferLen(len(arg19))
-		defer mbTrans20.Close()
-		_, err21 := mbTrans20.WriteString(arg19)
-		if err21 != nil {
-			Usage()
-			return
-		}
-		factory22 := thrift.NewTJSONProtocolFactory()
-		jsProt23 := factory22.GetProtocol(mbTrans20)
-		containerStruct0 := jaeger.NewCollectorSubmitBatchesArgs()
-		err24 := containerStruct0.ReadField1(context.Background(), jsProt23)
-		if err24 != nil {
-			Usage()
-			return
-		}
-		argvalue0 := containerStruct0.Batches
-		value0 := argvalue0
-		fmt.Print(client.SubmitBatches(context.Background(), value0))
-		fmt.Print("\n")
-		break
-	case "":
-		Usage()
-		break
-	default:
-		fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
-	}
-}
diff --git a/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go b/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go
deleted file mode 100644
index 10162857fbb..00000000000
--- a/exporters/jaeger/internal/gen-go/jaeger/jaeger-consts.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package jaeger
-
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"time"
-
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-func init() {
-}
diff --git a/exporters/jaeger/internal/gen-go/jaeger/jaeger.go b/exporters/jaeger/internal/gen-go/jaeger/jaeger.go
deleted file mode 100644
index b1fe26c57d9..00000000000
--- a/exporters/jaeger/internal/gen-go/jaeger/jaeger.go
+++ /dev/null
@@ -1,3022 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package jaeger
-
-import (
-	"bytes"
-	"context"
-	"database/sql/driver"
-	"errors"
-	"fmt"
-	"time"
-
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-type TagType int64
-
-const (
-	TagType_STRING TagType = 0
-	TagType_DOUBLE TagType = 1
-	TagType_BOOL   TagType = 2
-	TagType_LONG   TagType = 3
-	TagType_BINARY TagType = 4
-)
-
-func (p TagType) String() string {
-	switch p {
-	case TagType_STRING:
-		return "STRING"
-	case TagType_DOUBLE:
-		return "DOUBLE"
-	case TagType_BOOL:
-		return "BOOL"
-	case TagType_LONG:
-		return "LONG"
-	case TagType_BINARY:
-		return "BINARY"
-	}
-	return "<UNSET>"
-}
-
-func TagTypeFromString(s string) (TagType, error) {
-	switch s {
-	case "STRING":
-		return TagType_STRING, nil
-	case "DOUBLE":
-		return TagType_DOUBLE, nil
-	case "BOOL":
-		return TagType_BOOL, nil
-	case "LONG":
-		return TagType_LONG, nil
-	case "BINARY":
-		return TagType_BINARY, nil
-	}
-	return TagType(0), fmt.Errorf("not a valid TagType string")
-}
-
-func TagTypePtr(v TagType) *TagType { return &v }
-
-func (p TagType) MarshalText() ([]byte, error) {
-	return []byte(p.String()), nil
-}
-
-func (p *TagType) UnmarshalText(text []byte) error {
-	q, err := TagTypeFromString(string(text))
-	if err != nil {
-		return err
-	}
-	*p = q
-	return nil
-}
-
-func (p *TagType) Scan(value interface{}) error {
-	v, ok := value.(int64)
-	if !ok {
-		return errors.New("Scan value is not int64")
-	}
-	*p = TagType(v)
-	return nil
-}
-
-func (p *TagType) Value() (driver.Value, error) {
-	if p == nil {
-		return nil, nil
-	}
-	return int64(*p), nil
-}
-
-type SpanRefType int64
-
-const (
-	SpanRefType_CHILD_OF     SpanRefType = 0
-	SpanRefType_FOLLOWS_FROM SpanRefType = 1
-)
-
-func (p SpanRefType) String() string {
-	switch p {
-	case SpanRefType_CHILD_OF:
-		return "CHILD_OF"
-	case SpanRefType_FOLLOWS_FROM:
-		return "FOLLOWS_FROM"
-	}
-	return "<UNSET>"
-}
-
-func SpanRefTypeFromString(s string) (SpanRefType, error) {
-	switch s {
-	case "CHILD_OF":
-		return SpanRefType_CHILD_OF, nil
-	case "FOLLOWS_FROM":
-		return SpanRefType_FOLLOWS_FROM, nil
-	}
-	return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
-}
-
-func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
-
-func (p SpanRefType) MarshalText() ([]byte, error) {
-	return []byte(p.String()), nil
-}
-
-func (p *SpanRefType) UnmarshalText(text []byte) error {
-	q, err := SpanRefTypeFromString(string(text))
-	if err != nil {
-		return err
-	}
-	*p = q
-	return nil
-}
-
-func (p *SpanRefType) Scan(value interface{}) error {
-	v, ok := value.(int64)
-	if !ok {
-		return errors.New("Scan value is not int64")
-	}
-	*p = SpanRefType(v)
-	return nil
-}
-
-func (p *SpanRefType) Value() (driver.Value, error) {
-	if p == nil {
-		return nil, nil
-	}
-	return int64(*p), nil
-}
-
-// Attributes:
-//  - Key
-//  - VType
-//  - VStr
-//  - VDouble
-//  - VBool
-//  - VLong
-//  - VBinary
-type Tag struct {
-	Key     string   `thrift:"key,1,required" db:"key" json:"key"`
-	VType   TagType  `thrift:"vType,2,required" db:"vType" json:"vType"`
-	VStr    *string  `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"`
-	VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"`
-	VBool   *bool    `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"`
-	VLong   *int64   `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"`
-	VBinary []byte   `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"`
-}
-
-func NewTag() *Tag {
-	return &Tag{}
-}
-
-func (p *Tag) GetKey() string {
-	return p.Key
-}
-
-func (p *Tag) GetVType() TagType {
-	return p.VType
-}
-
-var Tag_VStr_DEFAULT string
-
-func (p *Tag) GetVStr() string {
-	if !p.IsSetVStr() {
-		return Tag_VStr_DEFAULT
-	}
-	return *p.VStr
-}
-
-var Tag_VDouble_DEFAULT float64
-
-func (p *Tag) GetVDouble() float64 {
-	if !p.IsSetVDouble() {
-		return Tag_VDouble_DEFAULT
-	}
-	return *p.VDouble
-}
-
-var Tag_VBool_DEFAULT bool
-
-func (p *Tag) GetVBool() bool {
-	if !p.IsSetVBool() {
-		return Tag_VBool_DEFAULT
-	}
-	return *p.VBool
-}
-
-var Tag_VLong_DEFAULT int64
-
-func (p *Tag) GetVLong() int64 {
-	if !p.IsSetVLong() {
-		return Tag_VLong_DEFAULT
-	}
-	return *p.VLong
-}
-
-var Tag_VBinary_DEFAULT []byte
-
-func (p *Tag) GetVBinary() []byte {
-	return p.VBinary
-}
-func (p *Tag) IsSetVStr() bool {
-	return p.VStr != nil
-}
-
-func (p *Tag) IsSetVDouble() bool {
-	return p.VDouble != nil
-}
-
-func (p *Tag) IsSetVBool() bool {
-	return p.VBool != nil
-}
-
-func (p *Tag) IsSetVLong() bool {
-	return p.VLong != nil
-}
-
-func (p *Tag) IsSetVBinary() bool {
-	return p.VBinary != nil
-}
-
-func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetKey bool = false
-	var issetVType bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-				issetKey = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.I32 {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-				issetVType = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 3:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField3(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 4:
-			if fieldTypeId == thrift.DOUBLE {
-				if err := p.ReadField4(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 5:
-			if fieldTypeId == thrift.BOOL {
-				if err := p.ReadField5(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 6:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField6(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 7:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField7(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetKey {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"))
-	}
-	if !issetVType {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"))
-	}
-	return nil
-}
-
-func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Key = v
-	}
-	return nil
-}
-
-func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(ctx); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		temp := TagType(v)
-		p.VType = temp
-	}
-	return nil
-}
-
-func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(ctx); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.VStr = &v
-	}
-	return nil
-}
-
-func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadDouble(ctx); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.VDouble = &v
-	}
-	return nil
-}
-
-func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBool(ctx); err != nil {
-		return thrift.PrependError("error reading field 5: ", err)
-	} else {
-		p.VBool = &v
-	}
-	return nil
-}
-
-func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 6: ", err)
-	} else {
-		p.VLong = &v
-	}
-	return nil
-}
-
-func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBinary(ctx); err != nil {
-		return thrift.PrependError("error reading field 7: ", err)
-	} else {
-		p.VBinary = v
-	}
-	return nil
-}
-
-func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField3(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField4(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField5(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField6(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField7(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
-	}
-	if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
-	}
-	return err
-}
-
-func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err)
-	}
-	if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err)
-	}
-	return err
-}
-
-func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetVStr() {
-		if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err)
-		}
-		if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetVDouble() {
-		if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err)
-		}
-		if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetVBool() {
-		if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err)
-		}
-		if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetVLong() {
-		if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err)
-		}
-		if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetVBinary() {
-		if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err)
-		}
-		if err := oprot.WriteBinary(ctx, p.VBinary); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Tag) Equals(other *Tag) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.Key != other.Key {
-		return false
-	}
-	if p.VType != other.VType {
-		return false
-	}
-	if p.VStr != other.VStr {
-		if p.VStr == nil || other.VStr == nil {
-			return false
-		}
-		if (*p.VStr) != (*other.VStr) {
-			return false
-		}
-	}
-	if p.VDouble != other.VDouble {
-		if p.VDouble == nil || other.VDouble == nil {
-			return false
-		}
-		if (*p.VDouble) != (*other.VDouble) {
-			return false
-		}
-	}
-	if p.VBool != other.VBool {
-		if p.VBool == nil || other.VBool == nil {
-			return false
-		}
-		if (*p.VBool) != (*other.VBool) {
-			return false
-		}
-	}
-	if p.VLong != other.VLong {
-		if p.VLong == nil || other.VLong == nil {
-			return false
-		}
-		if (*p.VLong) != (*other.VLong) {
-			return false
-		}
-	}
-	if bytes.Compare(p.VBinary, other.VBinary) != 0 {
-		return false
-	}
-	return true
-}
-
-func (p *Tag) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Tag(%+v)", *p)
-}
-
-// Attributes:
-//  - Timestamp
-//  - Fields
-type Log struct {
-	Timestamp int64  `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"`
-	Fields    []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"`
-}
-
-func NewLog() *Log {
-	return &Log{}
-}
-
-func (p *Log) GetTimestamp() int64 {
-	return p.Timestamp
-}
-
-func (p *Log) GetFields() []*Tag {
-	return p.Fields
-}
-func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetTimestamp bool = false
-	var issetFields bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-				issetTimestamp = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-				issetFields = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetTimestamp {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"))
-	}
-	if !issetFields {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"))
-	}
-	return nil
-}
-
-func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Timestamp = v
-	}
-	return nil
-}
-
-func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Tag, 0, size)
-	p.Fields = tSlice
-	for i := 0; i < size; i++ {
-		_elem0 := &Tag{}
-		if err := _elem0.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
-		}
-		p.Fields = append(p.Fields, _elem0)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "Log"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
-	}
-	return err
-}
-
-func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err)
-	}
-	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Fields {
-		if err := v.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(ctx); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err)
-	}
-	return err
-}
-
-func (p *Log) Equals(other *Log) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.Timestamp != other.Timestamp {
-		return false
-	}
-	if len(p.Fields) != len(other.Fields) {
-		return false
-	}
-	for i, _tgt := range p.Fields {
-		_src1 := other.Fields[i]
-		if !_tgt.Equals(_src1) {
-			return false
-		}
-	}
-	return true
-}
-
-func (p *Log) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Log(%+v)", *p)
-}
-
-// Attributes:
-//  - RefType
-//  - TraceIdLow
-//  - TraceIdHigh
-//  - SpanId
-type SpanRef struct {
-	RefType     SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"`
-	TraceIdLow  int64       `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"`
-	TraceIdHigh int64       `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"`
-	SpanId      int64       `thrift:"spanId,4,required" db:"spanId" json:"spanId"`
-}
-
-func NewSpanRef() *SpanRef {
-	return &SpanRef{}
-}
-
-func (p *SpanRef) GetRefType() SpanRefType {
-	return p.RefType
-}
-
-func (p *SpanRef) GetTraceIdLow() int64 {
-	return p.TraceIdLow
-}
-
-func (p *SpanRef) GetTraceIdHigh() int64 {
-	return p.TraceIdHigh
-}
-
-func (p *SpanRef) GetSpanId() int64 {
-	return p.SpanId
-}
-func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetRefType bool = false
-	var issetTraceIdLow bool = false
-	var issetTraceIdHigh bool = false
-	var issetSpanId bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.I32 {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-				issetRefType = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-				issetTraceIdLow = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 3:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField3(ctx, iprot); err != nil {
-					return err
-				}
-				issetTraceIdHigh = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 4:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField4(ctx, iprot); err != nil {
-					return err
-				}
-				issetSpanId = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetRefType {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"))
-	}
-	if !issetTraceIdLow {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
-	}
-	if !issetTraceIdHigh {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
-	}
-	if !issetSpanId {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
-	}
-	return nil
-}
-
-func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		temp := SpanRefType(v)
-		p.RefType = temp
-	}
-	return nil
-}
-
-func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.TraceIdLow = v
-	}
-	return nil
-}
-
-func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.TraceIdHigh = v
-	}
-	return nil
-}
-
-func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.SpanId = v
-	}
-	return nil
-}
-
-func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField3(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField4(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err)
-	}
-	if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err)
-	}
-	return err
-}
-
-func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err)
-	}
-	return err
-}
-
-func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err)
-	}
-	return err
-}
-
-func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err)
-	}
-	return err
-}
-
-func (p *SpanRef) Equals(other *SpanRef) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.RefType != other.RefType {
-		return false
-	}
-	if p.TraceIdLow != other.TraceIdLow {
-		return false
-	}
-	if p.TraceIdHigh != other.TraceIdHigh {
-		return false
-	}
-	if p.SpanId != other.SpanId {
-		return false
-	}
-	return true
-}
-
-func (p *SpanRef) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("SpanRef(%+v)", *p)
-}
-
-// Attributes:
-//  - TraceIdLow
-//  - TraceIdHigh
-//  - SpanId
-//  - ParentSpanId
-//  - OperationName
-//  - References
-//  - Flags
-//  - StartTime
-//  - Duration
-//  - Tags
-//  - Logs
-type Span struct {
-	TraceIdLow    int64      `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"`
-	TraceIdHigh   int64      `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"`
-	SpanId        int64      `thrift:"spanId,3,required" db:"spanId" json:"spanId"`
-	ParentSpanId  int64      `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"`
-	OperationName string     `thrift:"operationName,5,required" db:"operationName" json:"operationName"`
-	References    []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"`
-	Flags         int32      `thrift:"flags,7,required" db:"flags" json:"flags"`
-	StartTime     int64      `thrift:"startTime,8,required" db:"startTime" json:"startTime"`
-	Duration      int64      `thrift:"duration,9,required" db:"duration" json:"duration"`
-	Tags          []*Tag     `thrift:"tags,10" db:"tags" json:"tags,omitempty"`
-	Logs          []*Log     `thrift:"logs,11" db:"logs" json:"logs,omitempty"`
-}
-
-func NewSpan() *Span {
-	return &Span{}
-}
-
-func (p *Span) GetTraceIdLow() int64 {
-	return p.TraceIdLow
-}
-
-func (p *Span) GetTraceIdHigh() int64 {
-	return p.TraceIdHigh
-}
-
-func (p *Span) GetSpanId() int64 {
-	return p.SpanId
-}
-
-func (p *Span) GetParentSpanId() int64 {
-	return p.ParentSpanId
-}
-
-func (p *Span) GetOperationName() string {
-	return p.OperationName
-}
-
-var Span_References_DEFAULT []*SpanRef
-
-func (p *Span) GetReferences() []*SpanRef {
-	return p.References
-}
-
-func (p *Span) GetFlags() int32 {
-	return p.Flags
-}
-
-func (p *Span) GetStartTime() int64 {
-	return p.StartTime
-}
-
-func (p *Span) GetDuration() int64 {
-	return p.Duration
-}
-
-var Span_Tags_DEFAULT []*Tag
-
-func (p *Span) GetTags() []*Tag {
-	return p.Tags
-}
-
-var Span_Logs_DEFAULT []*Log
-
-func (p *Span) GetLogs() []*Log {
-	return p.Logs
-}
-func (p *Span) IsSetReferences() bool {
-	return p.References != nil
-}
-
-func (p *Span) IsSetTags() bool {
-	return p.Tags != nil
-}
-
-func (p *Span) IsSetLogs() bool {
-	return p.Logs != nil
-}
-
-func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetTraceIdLow bool = false
-	var issetTraceIdHigh bool = false
-	var issetSpanId bool = false
-	var issetParentSpanId bool = false
-	var issetOperationName bool = false
-	var issetFlags bool = false
-	var issetStartTime bool = false
-	var issetDuration bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-				issetTraceIdLow = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-				issetTraceIdHigh = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 3:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField3(ctx, iprot); err != nil {
-					return err
-				}
-				issetSpanId = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 4:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField4(ctx, iprot); err != nil {
-					return err
-				}
-				issetParentSpanId = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 5:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField5(ctx, iprot); err != nil {
-					return err
-				}
-				issetOperationName = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 6:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField6(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 7:
-			if fieldTypeId == thrift.I32 {
-				if err := p.ReadField7(ctx, iprot); err != nil {
-					return err
-				}
-				issetFlags = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 8:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField8(ctx, iprot); err != nil {
-					return err
-				}
-				issetStartTime = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 9:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField9(ctx, iprot); err != nil {
-					return err
-				}
-				issetDuration = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 10:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField10(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 11:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField11(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetTraceIdLow {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"))
-	}
-	if !issetTraceIdHigh {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"))
-	}
-	if !issetSpanId {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"))
-	}
-	if !issetParentSpanId {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"))
-	}
-	if !issetOperationName {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"))
-	}
-	if !issetFlags {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"))
-	}
-	if !issetStartTime {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"))
-	}
-	if !issetDuration {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"))
-	}
-	return nil
-}
-
-func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.TraceIdLow = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.TraceIdHigh = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.SpanId = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.ParentSpanId = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(ctx); err != nil {
-		return thrift.PrependError("error reading field 5: ", err)
-	} else {
-		p.OperationName = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*SpanRef, 0, size)
-	p.References = tSlice
-	for i := 0; i < size; i++ {
-		_elem2 := &SpanRef{}
-		if err := _elem2.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
-		}
-		p.References = append(p.References, _elem2)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(ctx); err != nil {
-		return thrift.PrependError("error reading field 7: ", err)
-	} else {
-		p.Flags = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 8: ", err)
-	} else {
-		p.StartTime = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 9: ", err)
-	} else {
-		p.Duration = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Tag, 0, size)
-	p.Tags = tSlice
-	for i := 0; i < size; i++ {
-		_elem3 := &Tag{}
-		if err := _elem3.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
-		}
-		p.Tags = append(p.Tags, _elem3)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Log, 0, size)
-	p.Logs = tSlice
-	for i := 0; i < size; i++ {
-		_elem4 := &Log{}
-		if err := _elem4.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
-		}
-		p.Logs = append(p.Logs, _elem4)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField3(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField4(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField5(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField6(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField7(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField8(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField9(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField10(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField11(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err)
-	}
-	if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetReferences() {
-		if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err)
-		}
-		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.References {
-			if err := v.Write(ctx, oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(ctx); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err)
-	}
-	if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetTags() {
-		if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err)
-		}
-		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Tags {
-			if err := v.Write(ctx, oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(ctx); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetLogs() {
-		if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err)
-		}
-		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Logs {
-			if err := v.Write(ctx, oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(ctx); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) Equals(other *Span) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.TraceIdLow != other.TraceIdLow {
-		return false
-	}
-	if p.TraceIdHigh != other.TraceIdHigh {
-		return false
-	}
-	if p.SpanId != other.SpanId {
-		return false
-	}
-	if p.ParentSpanId != other.ParentSpanId {
-		return false
-	}
-	if p.OperationName != other.OperationName {
-		return false
-	}
-	if len(p.References) != len(other.References) {
-		return false
-	}
-	for i, _tgt := range p.References {
-		_src5 := other.References[i]
-		if !_tgt.Equals(_src5) {
-			return false
-		}
-	}
-	if p.Flags != other.Flags {
-		return false
-	}
-	if p.StartTime != other.StartTime {
-		return false
-	}
-	if p.Duration != other.Duration {
-		return false
-	}
-	if len(p.Tags) != len(other.Tags) {
-		return false
-	}
-	for i, _tgt := range p.Tags {
-		_src6 := other.Tags[i]
-		if !_tgt.Equals(_src6) {
-			return false
-		}
-	}
-	if len(p.Logs) != len(other.Logs) {
-		return false
-	}
-	for i, _tgt := range p.Logs {
-		_src7 := other.Logs[i]
-		if !_tgt.Equals(_src7) {
-			return false
-		}
-	}
-	return true
-}
-
-func (p *Span) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-//  - ServiceName
-//  - Tags
-type Process struct {
-	ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"`
-	Tags        []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"`
-}
-
-func NewProcess() *Process {
-	return &Process{}
-}
-
-func (p *Process) GetServiceName() string {
-	return p.ServiceName
-}
-
-var Process_Tags_DEFAULT []*Tag
-
-func (p *Process) GetTags() []*Tag {
-	return p.Tags
-}
-func (p *Process) IsSetTags() bool {
-	return p.Tags != nil
-}
-
-func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetServiceName bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-				issetServiceName = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetServiceName {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"))
-	}
-	return nil
-}
-
-func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.ServiceName = v
-	}
-	return nil
-}
-
-func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Tag, 0, size)
-	p.Tags = tSlice
-	for i := 0; i < size; i++ {
-		_elem8 := &Tag{}
-		if err := _elem8.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err)
-		}
-		p.Tags = append(p.Tags, _elem8)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "Process"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err)
-	}
-	if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err)
-	}
-	return err
-}
-
-func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetTags() {
-		if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err)
-		}
-		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Tags {
-			if err := v.Write(ctx, oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(ctx); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Process) Equals(other *Process) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.ServiceName != other.ServiceName {
-		return false
-	}
-	if len(p.Tags) != len(other.Tags) {
-		return false
-	}
-	for i, _tgt := range p.Tags {
-		_src9 := other.Tags[i]
-		if !_tgt.Equals(_src9) {
-			return false
-		}
-	}
-	return true
-}
-
-func (p *Process) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Process(%+v)", *p)
-}
-
-// Attributes:
-//  - FullQueueDroppedSpans
-//  - TooLargeDroppedSpans
-//  - FailedToEmitSpans
-type ClientStats struct {
-	FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"`
-	TooLargeDroppedSpans  int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"`
-	FailedToEmitSpans     int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"`
-}
-
-func NewClientStats() *ClientStats {
-	return &ClientStats{}
-}
-
-func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
-	return p.FullQueueDroppedSpans
-}
-
-func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
-	return p.TooLargeDroppedSpans
-}
-
-func (p *ClientStats) GetFailedToEmitSpans() int64 {
-	return p.FailedToEmitSpans
-}
-func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetFullQueueDroppedSpans bool = false
-	var issetTooLargeDroppedSpans bool = false
-	var issetFailedToEmitSpans bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-				issetFullQueueDroppedSpans = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-				issetTooLargeDroppedSpans = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 3:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField3(ctx, iprot); err != nil {
-					return err
-				}
-				issetFailedToEmitSpans = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetFullQueueDroppedSpans {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"))
-	}
-	if !issetTooLargeDroppedSpans {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"))
-	}
-	if !issetFailedToEmitSpans {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"))
-	}
-	return nil
-}
-
-func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.FullQueueDroppedSpans = v
-	}
-	return nil
-}
-
-func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.TooLargeDroppedSpans = v
-	}
-	return nil
-}
-
-func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.FailedToEmitSpans = v
-	}
-	return nil
-}
-
-func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField3(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err)
-	}
-	return err
-}
-
-func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err)
-	}
-	return err
-}
-
-func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err)
-	}
-	return err
-}
-
-func (p *ClientStats) Equals(other *ClientStats) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans {
-		return false
-	}
-	if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans {
-		return false
-	}
-	if p.FailedToEmitSpans != other.FailedToEmitSpans {
-		return false
-	}
-	return true
-}
-
-func (p *ClientStats) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("ClientStats(%+v)", *p)
-}
-
-// Attributes:
-//  - Process
-//  - Spans
-//  - SeqNo
-//  - Stats
-type Batch struct {
-	Process *Process     `thrift:"process,1,required" db:"process" json:"process"`
-	Spans   []*Span      `thrift:"spans,2,required" db:"spans" json:"spans"`
-	SeqNo   *int64       `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"`
-	Stats   *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"`
-}
-
-func NewBatch() *Batch {
-	return &Batch{}
-}
-
-var Batch_Process_DEFAULT *Process
-
-func (p *Batch) GetProcess() *Process {
-	if !p.IsSetProcess() {
-		return Batch_Process_DEFAULT
-	}
-	return p.Process
-}
-
-func (p *Batch) GetSpans() []*Span {
-	return p.Spans
-}
-
-var Batch_SeqNo_DEFAULT int64
-
-func (p *Batch) GetSeqNo() int64 {
-	if !p.IsSetSeqNo() {
-		return Batch_SeqNo_DEFAULT
-	}
-	return *p.SeqNo
-}
-
-var Batch_Stats_DEFAULT *ClientStats
-
-func (p *Batch) GetStats() *ClientStats {
-	if !p.IsSetStats() {
-		return Batch_Stats_DEFAULT
-	}
-	return p.Stats
-}
-func (p *Batch) IsSetProcess() bool {
-	return p.Process != nil
-}
-
-func (p *Batch) IsSetSeqNo() bool {
-	return p.SeqNo != nil
-}
-
-func (p *Batch) IsSetStats() bool {
-	return p.Stats != nil
-}
-
-func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetProcess bool = false
-	var issetSpans bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.STRUCT {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-				issetProcess = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-				issetSpans = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 3:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField3(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 4:
-			if fieldTypeId == thrift.STRUCT {
-				if err := p.ReadField4(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetProcess {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"))
-	}
-	if !issetSpans {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"))
-	}
-	return nil
-}
-
-func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	p.Process = &Process{}
-	if err := p.Process.Read(ctx, iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
-	}
-	return nil
-}
-
-func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Span, 0, size)
-	p.Spans = tSlice
-	for i := 0; i < size; i++ {
-		_elem10 := &Span{}
-		if err := _elem10.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
-		}
-		p.Spans = append(p.Spans, _elem10)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.SeqNo = &v
-	}
-	return nil
-}
-
-func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
-	p.Stats = &ClientStats{}
-	if err := p.Stats.Read(ctx, iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
-	}
-	return nil
-}
-
-func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField3(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField4(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err)
-	}
-	if err := p.Process.Write(ctx, oprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err)
-	}
-	return err
-}
-
-func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err)
-	}
-	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Spans {
-		if err := v.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(ctx); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err)
-	}
-	return err
-}
-
-func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetSeqNo() {
-		if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err)
-		}
-		if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetStats() {
-		if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err)
-		}
-		if err := p.Stats.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Batch) Equals(other *Batch) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if !p.Process.Equals(other.Process) {
-		return false
-	}
-	if len(p.Spans) != len(other.Spans) {
-		return false
-	}
-	for i, _tgt := range p.Spans {
-		_src11 := other.Spans[i]
-		if !_tgt.Equals(_src11) {
-			return false
-		}
-	}
-	if p.SeqNo != other.SeqNo {
-		if p.SeqNo == nil || other.SeqNo == nil {
-			return false
-		}
-		if (*p.SeqNo) != (*other.SeqNo) {
-			return false
-		}
-	}
-	if !p.Stats.Equals(other.Stats) {
-		return false
-	}
-	return true
-}
-
-func (p *Batch) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Batch(%+v)", *p)
-}
-
-// Attributes:
-//  - Ok
-type BatchSubmitResponse struct {
-	Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
-}
-
-func NewBatchSubmitResponse() *BatchSubmitResponse {
-	return &BatchSubmitResponse{}
-}
-
-func (p *BatchSubmitResponse) GetOk() bool {
-	return p.Ok
-}
-func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetOk bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.BOOL {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-				issetOk = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetOk {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
-	}
-	return nil
-}
-
-func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBool(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Ok = v
-	}
-	return nil
-}
-
-func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
-	}
-	if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
-	}
-	return err
-}
-
-func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.Ok != other.Ok {
-		return false
-	}
-	return true
-}
-
-func (p *BatchSubmitResponse) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
-}
-
-type Collector interface {
-	// Parameters:
-	//  - Batches
-	SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error)
-}
-
-type CollectorClient struct {
-	c    thrift.TClient
-	meta thrift.ResponseMeta
-}
-
-func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient {
-	return &CollectorClient{
-		c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
-	}
-}
-
-func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient {
-	return &CollectorClient{
-		c: thrift.NewTStandardClient(iprot, oprot),
-	}
-}
-
-func NewCollectorClient(c thrift.TClient) *CollectorClient {
-	return &CollectorClient{
-		c: c,
-	}
-}
-
-func (p *CollectorClient) Client_() thrift.TClient {
-	return p.c
-}
-
-func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta {
-	return p.meta
-}
-
-func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
-	p.meta = meta
-}
-
-// Parameters:
-//  - Batches
-func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) {
-	var _args12 CollectorSubmitBatchesArgs
-	_args12.Batches = batches
-	var _result14 CollectorSubmitBatchesResult
-	var _meta13 thrift.ResponseMeta
-	_meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14)
-	p.SetLastResponseMeta_(_meta13)
-	if _err != nil {
-		return
-	}
-	return _result14.GetSuccess(), nil
-}
-
-type CollectorProcessor struct {
-	processorMap map[string]thrift.TProcessorFunction
-	handler      Collector
-}
-
-func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
-	p.processorMap[key] = processor
-}
-
-func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
-	processor, ok = p.processorMap[key]
-	return processor, ok
-}
-
-func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
-	return p.processorMap
-}
-
-func NewCollectorProcessor(handler Collector) *CollectorProcessor {
-
-	self15 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
-	self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler}
-	return self15
-}
-
-func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
-	if err2 != nil {
-		return false, thrift.WrapTException(err2)
-	}
-	if processor, ok := p.GetProcessorFunction(name); ok {
-		return processor.Process(ctx, seqId, iprot, oprot)
-	}
-	iprot.Skip(ctx, thrift.STRUCT)
-	iprot.ReadMessageEnd(ctx)
-	x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
-	oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
-	x16.Write(ctx, oprot)
-	oprot.WriteMessageEnd(ctx)
-	oprot.Flush(ctx)
-	return false, x16
-
-}
-
-type collectorProcessorSubmitBatches struct {
-	handler Collector
-}
-
-func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := CollectorSubmitBatchesArgs{}
-	var err2 error
-	if err2 = args.Read(ctx, iprot); err2 != nil {
-		iprot.ReadMessageEnd(ctx)
-		x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
-		oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
-		x.Write(ctx, oprot)
-		oprot.WriteMessageEnd(ctx)
-		oprot.Flush(ctx)
-		return false, thrift.WrapTException(err2)
-	}
-	iprot.ReadMessageEnd(ctx)
-
-	tickerCancel := func() {}
-	// Start a goroutine to do server side connectivity check.
-	if thrift.ServerConnectivityCheckInterval > 0 {
-		var cancel context.CancelFunc
-		ctx, cancel = context.WithCancel(ctx)
-		defer cancel()
-		var tickerCtx context.Context
-		tickerCtx, tickerCancel = context.WithCancel(context.Background())
-		defer tickerCancel()
-		go func(ctx context.Context, cancel context.CancelFunc) {
-			ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
-			defer ticker.Stop()
-			for {
-				select {
-				case <-ctx.Done():
-					return
-				case <-ticker.C:
-					if !iprot.Transport().IsOpen() {
-						cancel()
-						return
-					}
-				}
-			}
-		}(tickerCtx, cancel)
-	}
-
-	result := CollectorSubmitBatchesResult{}
-	var retval []*BatchSubmitResponse
-	if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil {
-		tickerCancel()
-		if err2 == thrift.ErrAbandonRequest {
-			return false, thrift.WrapTException(err2)
-		}
-		x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: "+err2.Error())
-		oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
-		x.Write(ctx, oprot)
-		oprot.WriteMessageEnd(ctx)
-		oprot.Flush(ctx)
-		return true, thrift.WrapTException(err2)
-	} else {
-		result.Success = retval
-	}
-	tickerCancel()
-	if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil {
-		err = thrift.WrapTException(err2)
-	}
-	if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
-		err = thrift.WrapTException(err2)
-	}
-	if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
-		err = thrift.WrapTException(err2)
-	}
-	if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
-		err = thrift.WrapTException(err2)
-	}
-	if err != nil {
-		return
-	}
-	return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-//  - Batches
-type CollectorSubmitBatchesArgs struct {
-	Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"`
-}
-
-func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs {
-	return &CollectorSubmitBatchesArgs{}
-}
-
-func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch {
-	return p.Batches
-}
-func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Batch, 0, size)
-	p.Batches = tSlice
-	for i := 0; i < size; i++ {
-		_elem17 := &Batch{}
-		if err := _elem17.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err)
-		}
-		p.Batches = append(p.Batches, _elem17)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err)
-	}
-	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Batches {
-		if err := v.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(ctx); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err)
-	}
-	return err
-}
-
-func (p *CollectorSubmitBatchesArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p)
-}
-
-// Attributes:
-//  - Success
-type CollectorSubmitBatchesResult struct {
-	Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
-}
-
-func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult {
-	return &CollectorSubmitBatchesResult{}
-}
-
-var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse
-
-func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse {
-	return p.Success
-}
-func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool {
-	return p.Success != nil
-}
-
-func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 0:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField0(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*BatchSubmitResponse, 0, size)
-	p.Success = tSlice
-	for i := 0; i < size; i++ {
-		_elem18 := &BatchSubmitResponse{}
-		if err := _elem18.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err)
-		}
-		p.Success = append(p.Success, _elem18)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField0(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetSuccess() {
-		if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
-		}
-		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Success {
-			if err := v.Write(ctx, oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(ctx); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *CollectorSubmitBatchesResult) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p)
-}
diff --git a/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go b/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go
deleted file mode 100644
index ebf43018fe7..00000000000
--- a/exporters/jaeger/internal/gen-go/zipkincore/GoUnusedProtection__.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package zipkincore
-
-var GoUnusedProtection__ int;
-
diff --git a/exporters/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go b/exporters/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go
deleted file mode 100755
index 127f67d05a1..00000000000
--- a/exporters/jaeger/internal/gen-go/zipkincore/zipkin_collector-remote/zipkin_collector-remote.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package main
-
-import (
-	"context"
-	"flag"
-	"fmt"
-	"math"
-	"net"
-	"net/url"
-	"os"
-	"strconv"
-	"strings"
-
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/zipkincore"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-var _ = zipkincore.GoUnusedProtection__
-
-func Usage() {
-	fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:")
-	flag.PrintDefaults()
-	fmt.Fprintln(os.Stderr, "\nFunctions:")
-	fmt.Fprintln(os.Stderr, "   submitZipkinBatch( spans)")
-	fmt.Fprintln(os.Stderr)
-	os.Exit(0)
-}
-
-type httpHeaders map[string]string
-
-func (h httpHeaders) String() string {
-	var m map[string]string = h
-	return fmt.Sprintf("%s", m)
-}
-
-func (h httpHeaders) Set(value string) error {
-	parts := strings.Split(value, ": ")
-	if len(parts) != 2 {
-		return fmt.Errorf("header should be of format 'Key: Value'")
-	}
-	h[parts[0]] = parts[1]
-	return nil
-}
-
-func main() {
-	flag.Usage = Usage
-	var host string
-	var port int
-	var protocol string
-	var urlString string
-	var framed bool
-	var useHttp bool
-	headers := make(httpHeaders)
-	var parsedUrl *url.URL
-	var trans thrift.TTransport
-	_ = strconv.Atoi
-	_ = math.Abs
-	flag.Usage = Usage
-	flag.StringVar(&host, "h", "localhost", "Specify host and port")
-	flag.IntVar(&port, "p", 9090, "Specify port")
-	flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)")
-	flag.StringVar(&urlString, "u", "", "Specify the url")
-	flag.BoolVar(&framed, "framed", false, "Use framed transport")
-	flag.BoolVar(&useHttp, "http", false, "Use http")
-	flag.Var(headers, "H", "Headers to set on the http(s) request (e.g. -H \"Key: Value\")")
-	flag.Parse()
-
-	if len(urlString) > 0 {
-		var err error
-		parsedUrl, err = url.Parse(urlString)
-		if err != nil {
-			fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
-			flag.Usage()
-		}
-		host = parsedUrl.Host
-		useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" || parsedUrl.Scheme == "https"
-	} else if useHttp {
-		_, err := url.Parse(fmt.Sprint("http://", host, ":", port))
-		if err != nil {
-			fmt.Fprintln(os.Stderr, "Error parsing URL: ", err)
-			flag.Usage()
-		}
-	}
-
-	cmd := flag.Arg(0)
-	var err error
-	if useHttp {
-		trans, err = thrift.NewTHttpClient(parsedUrl.String())
-		if len(headers) > 0 {
-			httptrans := trans.(*thrift.THttpClient)
-			for key, value := range headers {
-				httptrans.SetHeader(key, value)
-			}
-		}
-	} else {
-		portStr := fmt.Sprint(port)
-		if strings.Contains(host, ":") {
-			host, portStr, err = net.SplitHostPort(host)
-			if err != nil {
-				fmt.Fprintln(os.Stderr, "error with host:", err)
-				os.Exit(1)
-			}
-		}
-		trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr))
-		if err != nil {
-			fmt.Fprintln(os.Stderr, "error resolving address:", err)
-			os.Exit(1)
-		}
-		if framed {
-			trans = thrift.NewTFramedTransport(trans)
-		}
-	}
-	if err != nil {
-		fmt.Fprintln(os.Stderr, "Error creating transport", err)
-		os.Exit(1)
-	}
-	defer trans.Close()
-	var protocolFactory thrift.TProtocolFactory
-	switch protocol {
-	case "compact":
-		protocolFactory = thrift.NewTCompactProtocolFactory()
-		break
-	case "simplejson":
-		protocolFactory = thrift.NewTSimpleJSONProtocolFactory()
-		break
-	case "json":
-		protocolFactory = thrift.NewTJSONProtocolFactory()
-		break
-	case "binary", "":
-		protocolFactory = thrift.NewTBinaryProtocolFactoryDefault()
-		break
-	default:
-		fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol)
-		Usage()
-		os.Exit(1)
-	}
-	iprot := protocolFactory.GetProtocol(trans)
-	oprot := protocolFactory.GetProtocol(trans)
-	client := zipkincore.NewZipkinCollectorClient(thrift.NewTStandardClient(iprot, oprot))
-	if err := trans.Open(); err != nil {
-		fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err)
-		os.Exit(1)
-	}
-
-	switch cmd {
-	case "submitZipkinBatch":
-		if flag.NArg()-1 != 1 {
-			fmt.Fprintln(os.Stderr, "SubmitZipkinBatch requires 1 args")
-			flag.Usage()
-		}
-		arg11 := flag.Arg(1)
-		mbTrans12 := thrift.NewTMemoryBufferLen(len(arg11))
-		defer mbTrans12.Close()
-		_, err13 := mbTrans12.WriteString(arg11)
-		if err13 != nil {
-			Usage()
-			return
-		}
-		factory14 := thrift.NewTJSONProtocolFactory()
-		jsProt15 := factory14.GetProtocol(mbTrans12)
-		containerStruct0 := zipkincore.NewZipkinCollectorSubmitZipkinBatchArgs()
-		err16 := containerStruct0.ReadField1(context.Background(), jsProt15)
-		if err16 != nil {
-			Usage()
-			return
-		}
-		argvalue0 := containerStruct0.Spans
-		value0 := argvalue0
-		fmt.Print(client.SubmitZipkinBatch(context.Background(), value0))
-		fmt.Print("\n")
-		break
-	case "":
-		Usage()
-		break
-	default:
-		fmt.Fprintln(os.Stderr, "Invalid function ", cmd)
-	}
-}
diff --git a/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go b/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go
deleted file mode 100644
index 043ecba9626..00000000000
--- a/exporters/jaeger/internal/gen-go/zipkincore/zipkincore-consts.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package zipkincore
-
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"time"
-
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-const CLIENT_SEND = "cs"
-const CLIENT_RECV = "cr"
-const SERVER_SEND = "ss"
-const SERVER_RECV = "sr"
-const MESSAGE_SEND = "ms"
-const MESSAGE_RECV = "mr"
-const WIRE_SEND = "ws"
-const WIRE_RECV = "wr"
-const CLIENT_SEND_FRAGMENT = "csf"
-const CLIENT_RECV_FRAGMENT = "crf"
-const SERVER_SEND_FRAGMENT = "ssf"
-const SERVER_RECV_FRAGMENT = "srf"
-const LOCAL_COMPONENT = "lc"
-const CLIENT_ADDR = "ca"
-const SERVER_ADDR = "sa"
-const MESSAGE_ADDR = "ma"
-
-func init() {
-}
diff --git a/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go b/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go
deleted file mode 100644
index 7f46810e0d3..00000000000
--- a/exporters/jaeger/internal/gen-go/zipkincore/zipkincore.go
+++ /dev/null
@@ -1,2067 +0,0 @@
-// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
-
-package zipkincore
-
-import (
-	"bytes"
-	"context"
-	"database/sql/driver"
-	"errors"
-	"fmt"
-	"time"
-
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = context.Background
-var _ = time.Now
-var _ = bytes.Equal
-
-type AnnotationType int64
-
-const (
-	AnnotationType_BOOL   AnnotationType = 0
-	AnnotationType_BYTES  AnnotationType = 1
-	AnnotationType_I16    AnnotationType = 2
-	AnnotationType_I32    AnnotationType = 3
-	AnnotationType_I64    AnnotationType = 4
-	AnnotationType_DOUBLE AnnotationType = 5
-	AnnotationType_STRING AnnotationType = 6
-)
-
-func (p AnnotationType) String() string {
-	switch p {
-	case AnnotationType_BOOL:
-		return "BOOL"
-	case AnnotationType_BYTES:
-		return "BYTES"
-	case AnnotationType_I16:
-		return "I16"
-	case AnnotationType_I32:
-		return "I32"
-	case AnnotationType_I64:
-		return "I64"
-	case AnnotationType_DOUBLE:
-		return "DOUBLE"
-	case AnnotationType_STRING:
-		return "STRING"
-	}
-	return "<UNSET>"
-}
-
-func AnnotationTypeFromString(s string) (AnnotationType, error) {
-	switch s {
-	case "BOOL":
-		return AnnotationType_BOOL, nil
-	case "BYTES":
-		return AnnotationType_BYTES, nil
-	case "I16":
-		return AnnotationType_I16, nil
-	case "I32":
-		return AnnotationType_I32, nil
-	case "I64":
-		return AnnotationType_I64, nil
-	case "DOUBLE":
-		return AnnotationType_DOUBLE, nil
-	case "STRING":
-		return AnnotationType_STRING, nil
-	}
-	return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
-}
-
-func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
-
-func (p AnnotationType) MarshalText() ([]byte, error) {
-	return []byte(p.String()), nil
-}
-
-func (p *AnnotationType) UnmarshalText(text []byte) error {
-	q, err := AnnotationTypeFromString(string(text))
-	if err != nil {
-		return err
-	}
-	*p = q
-	return nil
-}
-
-func (p *AnnotationType) Scan(value interface{}) error {
-	v, ok := value.(int64)
-	if !ok {
-		return errors.New("Scan value is not int64")
-	}
-	*p = AnnotationType(v)
-	return nil
-}
-
-func (p *AnnotationType) Value() (driver.Value, error) {
-	if p == nil {
-		return nil, nil
-	}
-	return int64(*p), nil
-}
-
-// Indicates the network context of a service recording an annotation with two
-// exceptions.
-//
-// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
-// the endpoint indicates the source or destination of an RPC. This exception
-// allows zipkin to display network context of uninstrumented services, or
-// clients such as web browsers.
-//
-// Attributes:
-//  - Ipv4: IPv4 host address packed into 4 bytes.
-//
-// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
-//  - Port: IPv4 port
-//
-// Note: this is to be treated as an unsigned integer, so watch for negatives.
-//
-// Conventionally, when the port isn't known, port = 0.
-//  - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
-//
-// Conventionally, when the service name isn't known, service_name = "unknown".
-//  - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
-type Endpoint struct {
-	Ipv4        int32  `thrift:"ipv4,1" db:"ipv4" json:"ipv4"`
-	Port        int16  `thrift:"port,2" db:"port" json:"port"`
-	ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"`
-	Ipv6        []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"`
-}
-
-func NewEndpoint() *Endpoint {
-	return &Endpoint{}
-}
-
-func (p *Endpoint) GetIpv4() int32 {
-	return p.Ipv4
-}
-
-func (p *Endpoint) GetPort() int16 {
-	return p.Port
-}
-
-func (p *Endpoint) GetServiceName() string {
-	return p.ServiceName
-}
-
-var Endpoint_Ipv6_DEFAULT []byte
-
-func (p *Endpoint) GetIpv6() []byte {
-	return p.Ipv6
-}
-func (p *Endpoint) IsSetIpv6() bool {
-	return p.Ipv6 != nil
-}
-
-func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.I32 {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.I16 {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 3:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField3(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 4:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField4(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Ipv4 = v
-	}
-	return nil
-}
-
-func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI16(ctx); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.Port = v
-	}
-	return nil
-}
-
-func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(ctx); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.ServiceName = v
-	}
-	return nil
-}
-
-func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBinary(ctx); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.Ipv6 = v
-	}
-	return nil
-}
-
-func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField3(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField4(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err)
-	}
-	if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err)
-	}
-	return err
-}
-
-func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err)
-	}
-	if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err)
-	}
-	return err
-}
-
-func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err)
-	}
-	if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err)
-	}
-	return err
-}
-
-func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetIpv6() {
-		if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err)
-		}
-		if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Endpoint) Equals(other *Endpoint) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.Ipv4 != other.Ipv4 {
-		return false
-	}
-	if p.Port != other.Port {
-		return false
-	}
-	if p.ServiceName != other.ServiceName {
-		return false
-	}
-	if bytes.Compare(p.Ipv6, other.Ipv6) != 0 {
-		return false
-	}
-	return true
-}
-
-func (p *Endpoint) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Endpoint(%+v)", *p)
-}
-
-// An annotation is similar to a log statement. It includes a host field which
-// allows these events to be attributed properly, and also aggregatable.
-//
-// Attributes:
-//  - Timestamp: Microseconds from epoch.
-//
-// This value should use the most precise value possible. For example,
-// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
-//  - Value
-//  - Host: Always the host that recorded the event. By specifying the host you allow
-// rollup of all events (such as client requests to a service) by IP address.
-type Annotation struct {
-	Timestamp int64     `thrift:"timestamp,1" db:"timestamp" json:"timestamp"`
-	Value     string    `thrift:"value,2" db:"value" json:"value"`
-	Host      *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"`
-}
-
-func NewAnnotation() *Annotation {
-	return &Annotation{}
-}
-
-func (p *Annotation) GetTimestamp() int64 {
-	return p.Timestamp
-}
-
-func (p *Annotation) GetValue() string {
-	return p.Value
-}
-
-var Annotation_Host_DEFAULT *Endpoint
-
-func (p *Annotation) GetHost() *Endpoint {
-	if !p.IsSetHost() {
-		return Annotation_Host_DEFAULT
-	}
-	return p.Host
-}
-func (p *Annotation) IsSetHost() bool {
-	return p.Host != nil
-}
-
-func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 3:
-			if fieldTypeId == thrift.STRUCT {
-				if err := p.ReadField3(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Timestamp = v
-	}
-	return nil
-}
-
-func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(ctx); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.Value = v
-	}
-	return nil
-}
-
-func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
-	p.Host = &Endpoint{}
-	if err := p.Host.Read(ctx, iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
-	}
-	return nil
-}
-
-func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField3(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
-	}
-	return err
-}
-
-func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
-	}
-	if err := oprot.WriteString(ctx, string(p.Value)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
-	}
-	return err
-}
-
-func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetHost() {
-		if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err)
-		}
-		if err := p.Host.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Annotation) Equals(other *Annotation) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.Timestamp != other.Timestamp {
-		return false
-	}
-	if p.Value != other.Value {
-		return false
-	}
-	if !p.Host.Equals(other.Host) {
-		return false
-	}
-	return true
-}
-
-func (p *Annotation) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Annotation(%+v)", *p)
-}
-
-// Binary annotations are tags applied to a Span to give it context. For
-// example, a binary annotation of "http.uri" could the path to a resource in a
-// RPC call.
-//
-// Binary annotations of type STRING are always queryable, though more a
-// historical implementation detail than a structural concern.
-//
-// Binary annotations can repeat, and vary on the host. Similar to Annotation,
-// the host indicates who logged the event. This allows you to tell the
-// difference between the client and server side of the same key. For example,
-// the key "http.uri" might be different on the client and server side due to
-// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
-// you can see the different points of view, which often help in debugging.
-//
-// Attributes:
-//  - Key
-//  - Value
-//  - AnnotationType
-//  - Host: The host that recorded tag, which allows you to differentiate between
-// multiple tags with the same key. There are two exceptions to this.
-//
-// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
-// destination of an RPC. This exception allows zipkin to display network
-// context of uninstrumented services, or clients such as web browsers.
-type BinaryAnnotation struct {
-	Key            string         `thrift:"key,1" db:"key" json:"key"`
-	Value          []byte         `thrift:"value,2" db:"value" json:"value"`
-	AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"`
-	Host           *Endpoint      `thrift:"host,4" db:"host" json:"host,omitempty"`
-}
-
-func NewBinaryAnnotation() *BinaryAnnotation {
-	return &BinaryAnnotation{}
-}
-
-func (p *BinaryAnnotation) GetKey() string {
-	return p.Key
-}
-
-func (p *BinaryAnnotation) GetValue() []byte {
-	return p.Value
-}
-
-func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
-	return p.AnnotationType
-}
-
-var BinaryAnnotation_Host_DEFAULT *Endpoint
-
-func (p *BinaryAnnotation) GetHost() *Endpoint {
-	if !p.IsSetHost() {
-		return BinaryAnnotation_Host_DEFAULT
-	}
-	return p.Host
-}
-func (p *BinaryAnnotation) IsSetHost() bool {
-	return p.Host != nil
-}
-
-func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField2(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 3:
-			if fieldTypeId == thrift.I32 {
-				if err := p.ReadField3(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 4:
-			if fieldTypeId == thrift.STRUCT {
-				if err := p.ReadField4(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Key = v
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBinary(ctx); err != nil {
-		return thrift.PrependError("error reading field 2: ", err)
-	} else {
-		p.Value = v
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI32(ctx); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		temp := AnnotationType(v)
-		p.AnnotationType = temp
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
-	p.Host = &Endpoint{}
-	if err := p.Host.Read(ctx, iprot); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField2(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField3(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField4(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
-	}
-	if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
-	}
-	return err
-}
-
-func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
-	}
-	if err := oprot.WriteBinary(ctx, p.Value); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
-	}
-	return err
-}
-
-func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err)
-	}
-	if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err)
-	}
-	return err
-}
-
-func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetHost() {
-		if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err)
-		}
-		if err := p.Host.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.Key != other.Key {
-		return false
-	}
-	if bytes.Compare(p.Value, other.Value) != 0 {
-		return false
-	}
-	if p.AnnotationType != other.AnnotationType {
-		return false
-	}
-	if !p.Host.Equals(other.Host) {
-		return false
-	}
-	return true
-}
-
-func (p *BinaryAnnotation) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
-}
-
-// A trace is a series of spans (often RPC calls) which form a latency tree.
-//
-// The root span is where trace_id = id and parent_id = Nil. The root span is
-// usually the longest interval in the trace, starting with a SERVER_RECV
-// annotation and ending with a SERVER_SEND.
-//
-// Attributes:
-//  - TraceID
-//  - Name: Span name in lowercase, rpc method for example
-//
-// Conventionally, when the span name isn't known, name = "unknown".
-//  - ID
-//  - ParentID
-//  - Annotations
-//  - BinaryAnnotations
-//  - Debug
-//  - Timestamp: Microseconds from epoch of the creation of this span.
-//
-// This value should be set directly by instrumentation, using the most
-// precise value possible. For example, gettimeofday or syncing nanoTime
-// against a tick of currentTimeMillis.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this via Annotation.timestamp.
-// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
-//
-// This field is optional for compatibility with old data: first-party span
-// stores are expected to support this at time of introduction.
-//  - Duration: Measurement of duration in microseconds, used to support queries.
-//
-// This value should be set directly, where possible. Doing so encourages
-// precise measurement decoupled from problems of clocks, such as skew or NTP
-// updates causing time to move backwards.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this by subtracting Annotation.timestamp.
-// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
-//
-// If this field is persisted as unset, zipkin will continue to work, except
-// duration query support will be implementation-specific. Similarly, setting
-// this field non-atomically is implementation-specific.
-//
-// This field is i64 vs i32 to support spans longer than 35 minutes.
-//  - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
-// means the trace uses 128 bit traceIds instead of 64 bit.
-type Span struct {
-	TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"`
-	// unused field # 2
-	Name        string        `thrift:"name,3" db:"name" json:"name"`
-	ID          int64         `thrift:"id,4" db:"id" json:"id"`
-	ParentID    *int64        `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"`
-	Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"`
-	// unused field # 7
-	BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"`
-	Debug             bool                `thrift:"debug,9" db:"debug" json:"debug"`
-	Timestamp         *int64              `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"`
-	Duration          *int64              `thrift:"duration,11" db:"duration" json:"duration,omitempty"`
-	TraceIDHigh       *int64              `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"`
-}
-
-func NewSpan() *Span {
-	return &Span{}
-}
-
-func (p *Span) GetTraceID() int64 {
-	return p.TraceID
-}
-
-func (p *Span) GetName() string {
-	return p.Name
-}
-
-func (p *Span) GetID() int64 {
-	return p.ID
-}
-
-var Span_ParentID_DEFAULT int64
-
-func (p *Span) GetParentID() int64 {
-	if !p.IsSetParentID() {
-		return Span_ParentID_DEFAULT
-	}
-	return *p.ParentID
-}
-
-func (p *Span) GetAnnotations() []*Annotation {
-	return p.Annotations
-}
-
-func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
-	return p.BinaryAnnotations
-}
-
-var Span_Debug_DEFAULT bool = false
-
-func (p *Span) GetDebug() bool {
-	return p.Debug
-}
-
-var Span_Timestamp_DEFAULT int64
-
-func (p *Span) GetTimestamp() int64 {
-	if !p.IsSetTimestamp() {
-		return Span_Timestamp_DEFAULT
-	}
-	return *p.Timestamp
-}
-
-var Span_Duration_DEFAULT int64
-
-func (p *Span) GetDuration() int64 {
-	if !p.IsSetDuration() {
-		return Span_Duration_DEFAULT
-	}
-	return *p.Duration
-}
-
-var Span_TraceIDHigh_DEFAULT int64
-
-func (p *Span) GetTraceIDHigh() int64 {
-	if !p.IsSetTraceIDHigh() {
-		return Span_TraceIDHigh_DEFAULT
-	}
-	return *p.TraceIDHigh
-}
-func (p *Span) IsSetParentID() bool {
-	return p.ParentID != nil
-}
-
-func (p *Span) IsSetDebug() bool {
-	return p.Debug != Span_Debug_DEFAULT
-}
-
-func (p *Span) IsSetTimestamp() bool {
-	return p.Timestamp != nil
-}
-
-func (p *Span) IsSetDuration() bool {
-	return p.Duration != nil
-}
-
-func (p *Span) IsSetTraceIDHigh() bool {
-	return p.TraceIDHigh != nil
-}
-
-func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 3:
-			if fieldTypeId == thrift.STRING {
-				if err := p.ReadField3(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 4:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField4(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 5:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField5(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 6:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField6(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 8:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField8(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 9:
-			if fieldTypeId == thrift.BOOL {
-				if err := p.ReadField9(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 10:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField10(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 11:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField11(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		case 12:
-			if fieldTypeId == thrift.I64 {
-				if err := p.ReadField12(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.TraceID = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadString(ctx); err != nil {
-		return thrift.PrependError("error reading field 3: ", err)
-	} else {
-		p.Name = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 4: ", err)
-	} else {
-		p.ID = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 5: ", err)
-	} else {
-		p.ParentID = &v
-	}
-	return nil
-}
-
-func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Annotation, 0, size)
-	p.Annotations = tSlice
-	for i := 0; i < size; i++ {
-		_elem0 := &Annotation{}
-		if err := _elem0.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
-		}
-		p.Annotations = append(p.Annotations, _elem0)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*BinaryAnnotation, 0, size)
-	p.BinaryAnnotations = tSlice
-	for i := 0; i < size; i++ {
-		_elem1 := &BinaryAnnotation{}
-		if err := _elem1.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
-		}
-		p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBool(ctx); err != nil {
-		return thrift.PrependError("error reading field 9: ", err)
-	} else {
-		p.Debug = v
-	}
-	return nil
-}
-
-func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 10: ", err)
-	} else {
-		p.Timestamp = &v
-	}
-	return nil
-}
-
-func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 11: ", err)
-	} else {
-		p.Duration = &v
-	}
-	return nil
-}
-
-func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadI64(ctx); err != nil {
-		return thrift.PrependError("error reading field 12: ", err)
-	} else {
-		p.TraceIDHigh = &v
-	}
-	return nil
-}
-
-func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField3(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField4(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField5(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField6(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField8(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField9(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField10(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField11(ctx, oprot); err != nil {
-			return err
-		}
-		if err := p.writeField12(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err)
-	}
-	if err := oprot.WriteString(ctx, string(p.Name)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err)
-	}
-	if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetParentID() {
-		if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err)
-		}
-		if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err)
-	}
-	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Annotations {
-		if err := v.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(ctx); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err)
-	}
-	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.BinaryAnnotations {
-		if err := v.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(ctx); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err)
-	}
-	return err
-}
-
-func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetDebug() {
-		if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err)
-		}
-		if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetTimestamp() {
-		if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err)
-		}
-		if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetDuration() {
-		if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err)
-		}
-		if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetTraceIDHigh() {
-		if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err)
-		}
-		if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *Span) Equals(other *Span) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.TraceID != other.TraceID {
-		return false
-	}
-	if p.Name != other.Name {
-		return false
-	}
-	if p.ID != other.ID {
-		return false
-	}
-	if p.ParentID != other.ParentID {
-		if p.ParentID == nil || other.ParentID == nil {
-			return false
-		}
-		if (*p.ParentID) != (*other.ParentID) {
-			return false
-		}
-	}
-	if len(p.Annotations) != len(other.Annotations) {
-		return false
-	}
-	for i, _tgt := range p.Annotations {
-		_src2 := other.Annotations[i]
-		if !_tgt.Equals(_src2) {
-			return false
-		}
-	}
-	if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) {
-		return false
-	}
-	for i, _tgt := range p.BinaryAnnotations {
-		_src3 := other.BinaryAnnotations[i]
-		if !_tgt.Equals(_src3) {
-			return false
-		}
-	}
-	if p.Debug != other.Debug {
-		return false
-	}
-	if p.Timestamp != other.Timestamp {
-		if p.Timestamp == nil || other.Timestamp == nil {
-			return false
-		}
-		if (*p.Timestamp) != (*other.Timestamp) {
-			return false
-		}
-	}
-	if p.Duration != other.Duration {
-		if p.Duration == nil || other.Duration == nil {
-			return false
-		}
-		if (*p.Duration) != (*other.Duration) {
-			return false
-		}
-	}
-	if p.TraceIDHigh != other.TraceIDHigh {
-		if p.TraceIDHigh == nil || other.TraceIDHigh == nil {
-			return false
-		}
-		if (*p.TraceIDHigh) != (*other.TraceIDHigh) {
-			return false
-		}
-	}
-	return true
-}
-
-func (p *Span) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Span(%+v)", *p)
-}
-
-// Attributes:
-//  - Ok
-type Response struct {
-	Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
-}
-
-func NewResponse() *Response {
-	return &Response{}
-}
-
-func (p *Response) GetOk() bool {
-	return p.Ok
-}
-func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	var issetOk bool = false
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.BOOL {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-				issetOk = true
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	if !issetOk {
-		return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"))
-	}
-	return nil
-}
-
-func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	if v, err := iprot.ReadBool(ctx); err != nil {
-		return thrift.PrependError("error reading field 1: ", err)
-	} else {
-		p.Ok = v
-	}
-	return nil
-}
-
-func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "Response"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err)
-	}
-	if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err)
-	}
-	return err
-}
-
-func (p *Response) Equals(other *Response) bool {
-	if p == other {
-		return true
-	} else if p == nil || other == nil {
-		return false
-	}
-	if p.Ok != other.Ok {
-		return false
-	}
-	return true
-}
-
-func (p *Response) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("Response(%+v)", *p)
-}
-
-type ZipkinCollector interface {
-	// Parameters:
-	//  - Spans
-	SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error)
-}
-
-type ZipkinCollectorClient struct {
-	c    thrift.TClient
-	meta thrift.ResponseMeta
-}
-
-func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
-	return &ZipkinCollectorClient{
-		c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
-	}
-}
-
-func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
-	return &ZipkinCollectorClient{
-		c: thrift.NewTStandardClient(iprot, oprot),
-	}
-}
-
-func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient {
-	return &ZipkinCollectorClient{
-		c: c,
-	}
-}
-
-func (p *ZipkinCollectorClient) Client_() thrift.TClient {
-	return p.c
-}
-
-func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta {
-	return p.meta
-}
-
-func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
-	p.meta = meta
-}
-
-// Parameters:
-//  - Spans
-func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) {
-	var _args4 ZipkinCollectorSubmitZipkinBatchArgs
-	_args4.Spans = spans
-	var _result6 ZipkinCollectorSubmitZipkinBatchResult
-	var _meta5 thrift.ResponseMeta
-	_meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6)
-	p.SetLastResponseMeta_(_meta5)
-	if _err != nil {
-		return
-	}
-	return _result6.GetSuccess(), nil
-}
-
-type ZipkinCollectorProcessor struct {
-	processorMap map[string]thrift.TProcessorFunction
-	handler      ZipkinCollector
-}
-
-func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
-	p.processorMap[key] = processor
-}
-
-func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
-	processor, ok = p.processorMap[key]
-	return processor, ok
-}
-
-func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
-	return p.processorMap
-}
-
-func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
-
-	self7 := &ZipkinCollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
-	self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler: handler}
-	return self7
-}
-
-func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
-	if err2 != nil {
-		return false, thrift.WrapTException(err2)
-	}
-	if processor, ok := p.GetProcessorFunction(name); ok {
-		return processor.Process(ctx, seqId, iprot, oprot)
-	}
-	iprot.Skip(ctx, thrift.STRUCT)
-	iprot.ReadMessageEnd(ctx)
-	x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
-	oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
-	x8.Write(ctx, oprot)
-	oprot.WriteMessageEnd(ctx)
-	oprot.Flush(ctx)
-	return false, x8
-
-}
-
-type zipkinCollectorProcessorSubmitZipkinBatch struct {
-	handler ZipkinCollector
-}
-
-func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
-	args := ZipkinCollectorSubmitZipkinBatchArgs{}
-	var err2 error
-	if err2 = args.Read(ctx, iprot); err2 != nil {
-		iprot.ReadMessageEnd(ctx)
-		x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
-		oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
-		x.Write(ctx, oprot)
-		oprot.WriteMessageEnd(ctx)
-		oprot.Flush(ctx)
-		return false, thrift.WrapTException(err2)
-	}
-	iprot.ReadMessageEnd(ctx)
-
-	tickerCancel := func() {}
-	// Start a goroutine to do server side connectivity check.
-	if thrift.ServerConnectivityCheckInterval > 0 {
-		var cancel context.CancelFunc
-		ctx, cancel = context.WithCancel(ctx)
-		defer cancel()
-		var tickerCtx context.Context
-		tickerCtx, tickerCancel = context.WithCancel(context.Background())
-		defer tickerCancel()
-		go func(ctx context.Context, cancel context.CancelFunc) {
-			ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
-			defer ticker.Stop()
-			for {
-				select {
-				case <-ctx.Done():
-					return
-				case <-ticker.C:
-					if !iprot.Transport().IsOpen() {
-						cancel()
-						return
-					}
-				}
-			}
-		}(tickerCtx, cancel)
-	}
-
-	result := ZipkinCollectorSubmitZipkinBatchResult{}
-	var retval []*Response
-	if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil {
-		tickerCancel()
-		if err2 == thrift.ErrAbandonRequest {
-			return false, thrift.WrapTException(err2)
-		}
-		x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: "+err2.Error())
-		oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
-		x.Write(ctx, oprot)
-		oprot.WriteMessageEnd(ctx)
-		oprot.Flush(ctx)
-		return true, thrift.WrapTException(err2)
-	} else {
-		result.Success = retval
-	}
-	tickerCancel()
-	if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
-		err = thrift.WrapTException(err2)
-	}
-	if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
-		err = thrift.WrapTException(err2)
-	}
-	if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
-		err = thrift.WrapTException(err2)
-	}
-	if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
-		err = thrift.WrapTException(err2)
-	}
-	if err != nil {
-		return
-	}
-	return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-//  - Spans
-type ZipkinCollectorSubmitZipkinBatchArgs struct {
-	Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
-	return &ZipkinCollectorSubmitZipkinBatchArgs{}
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
-	return p.Spans
-}
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 1:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField1(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Span, 0, size)
-	p.Spans = tSlice
-	for i := 0; i < size; i++ {
-		_elem9 := &Span{}
-		if err := _elem9.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err)
-		}
-		p.Spans = append(p.Spans, _elem9)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField1(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err)
-	}
-	if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
-		return thrift.PrependError("error writing list begin: ", err)
-	}
-	for _, v := range p.Spans {
-		if err := v.Write(ctx, oprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-		}
-	}
-	if err := oprot.WriteListEnd(ctx); err != nil {
-		return thrift.PrependError("error writing list end: ", err)
-	}
-	if err := oprot.WriteFieldEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err)
-	}
-	return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
-}
-
-// Attributes:
-//  - Success
-type ZipkinCollectorSubmitZipkinBatchResult struct {
-	Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"`
-}
-
-func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
-	return &ZipkinCollectorSubmitZipkinBatchResult{}
-}
-
-var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
-	return p.Success
-}
-func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
-	return p.Success != nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
-	if _, err := iprot.ReadStructBegin(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
-	}
-
-	for {
-		_, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
-		}
-		if fieldTypeId == thrift.STOP {
-			break
-		}
-		switch fieldId {
-		case 0:
-			if fieldTypeId == thrift.LIST {
-				if err := p.ReadField0(ctx, iprot); err != nil {
-					return err
-				}
-			} else {
-				if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-					return err
-				}
-			}
-		default:
-			if err := iprot.Skip(ctx, fieldTypeId); err != nil {
-				return err
-			}
-		}
-		if err := iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
-	_, size, err := iprot.ReadListBegin(ctx)
-	if err != nil {
-		return thrift.PrependError("error reading list begin: ", err)
-	}
-	tSlice := make([]*Response, 0, size)
-	p.Success = tSlice
-	for i := 0; i < size; i++ {
-		_elem10 := &Response{}
-		if err := _elem10.Read(ctx, iprot); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
-		}
-		p.Success = append(p.Success, _elem10)
-	}
-	if err := iprot.ReadListEnd(ctx); err != nil {
-		return thrift.PrependError("error reading list end: ", err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
-	if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil {
-		return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
-	}
-	if p != nil {
-		if err := p.writeField0(ctx, oprot); err != nil {
-			return err
-		}
-	}
-	if err := oprot.WriteFieldStop(ctx); err != nil {
-		return thrift.PrependError("write field stop error: ", err)
-	}
-	if err := oprot.WriteStructEnd(ctx); err != nil {
-		return thrift.PrependError("write struct stop error: ", err)
-	}
-	return nil
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
-	if p.IsSetSuccess() {
-		if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
-		}
-		if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
-			return thrift.PrependError("error writing list begin: ", err)
-		}
-		for _, v := range p.Success {
-			if err := v.Write(ctx, oprot); err != nil {
-				return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
-			}
-		}
-		if err := oprot.WriteListEnd(ctx); err != nil {
-			return thrift.PrependError("error writing list end: ", err)
-		}
-		if err := oprot.WriteFieldEnd(ctx); err != nil {
-			return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
-		}
-	}
-	return err
-}
-
-func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
-	if p == nil {
-		return "<nil>"
-	}
-	return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
-}
diff --git a/exporters/jaeger/internal/gen.go b/exporters/jaeger/internal/gen.go
deleted file mode 100644
index ceb2b04e3d6..00000000000
--- a/exporters/jaeger/internal/gen.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/exporters/jaeger/internal"
-
-//go:generate gotmpl --body=../../../internal/shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
-//go:generate gotmpl --body=../../../internal/shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
-//go:generate gotmpl --body=../../../internal/shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
-
-//go:generate gotmpl --body=../../../internal/shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
-//go:generate gotmpl --body=../../../internal/shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
-//go:generate gotmpl --body=../../../internal/shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
-//go:generate gotmpl --body=../../../internal/shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
-//go:generate gotmpl --body=../../../internal/shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/exporters/jaeger/internal/matchers\"}" --out=internaltest/harness.go
-//go:generate gotmpl --body=../../../internal/shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
-//go:generate gotmpl --body=../../../internal/shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
-//go:generate gotmpl --body=../../../internal/shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
-//go:generate gotmpl --body=../../../internal/shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go
diff --git a/exporters/jaeger/internal/internaltest/alignment.go b/exporters/jaeger/internal/internaltest/alignment.go
deleted file mode 100644
index 6885811cccc..00000000000
--- a/exporters/jaeger/internal/internaltest/alignment.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/internaltest/alignment.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internaltest // import "go.opentelemetry.io/otel/exporters/jaeger/internal/internaltest"
-
-/*
-This file contains common utilities and objects to validate memory alignment
-of Go types. The primary use of this functionality is intended to ensure
-`struct` fields that need to be 64-bit aligned so they can be passed as
-arguments to 64-bit atomic operations.
-
-The common workflow is to define a slice of `FieldOffset` and pass them to the
-`Aligned8Byte` function from within a `TestMain` function from a package's
-tests. It is important to make this call from the `TestMain` function prior
-to running the rest of the test suit as it can provide useful diagnostics
-about field alignment instead of ambiguous nil pointer dereference and runtime
-panic.
-
-For more information:
-https://github.com/open-telemetry/opentelemetry-go/issues/341
-*/
-
-import (
-	"fmt"
-	"io"
-)
-
-// FieldOffset is a preprocessor representation of a struct field alignment.
-type FieldOffset struct {
-	// Name of the field.
-	Name string
-
-	// Offset of the field in bytes.
-	//
-	// To compute this at compile time use unsafe.Offsetof.
-	Offset uintptr
-}
-
-// Aligned8Byte returns if all fields are aligned modulo 8-bytes.
-//
-// Error messaging is printed to out for any field determined misaligned.
-func Aligned8Byte(fields []FieldOffset, out io.Writer) bool {
-	misaligned := make([]FieldOffset, 0)
-	for _, f := range fields {
-		if f.Offset%8 != 0 {
-			misaligned = append(misaligned, f)
-		}
-	}
-
-	if len(misaligned) == 0 {
-		return true
-	}
-
-	fmt.Fprintln(out, "struct fields not aligned for 64-bit atomic operations:")
-	for _, f := range misaligned {
-		fmt.Fprintf(out, "  %s: %d-byte offset\n", f.Name, f.Offset)
-	}
-
-	return false
-}
diff --git a/exporters/jaeger/internal/internaltest/env.go b/exporters/jaeger/internal/internaltest/env.go
deleted file mode 100644
index 8f05ef4961d..00000000000
--- a/exporters/jaeger/internal/internaltest/env.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/internaltest/env.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internaltest // import "go.opentelemetry.io/otel/exporters/jaeger/internal/internaltest"
-
-import (
-	"os"
-)
-
-type Env struct {
-	Name   string
-	Value  string
-	Exists bool
-}
-
-// EnvStore stores and recovers environment variables.
-type EnvStore interface {
-	// Records the environment variable into the store.
-	Record(key string)
-
-	// Restore recovers the environment variables in the store.
-	Restore() error
-}
-
-var _ EnvStore = (*envStore)(nil)
-
-type envStore struct {
-	store map[string]Env
-}
-
-func (s *envStore) add(env Env) {
-	s.store[env.Name] = env
-}
-
-func (s *envStore) Restore() error {
-	var err error
-	for _, v := range s.store {
-		if v.Exists {
-			err = os.Setenv(v.Name, v.Value)
-		} else {
-			err = os.Unsetenv(v.Name)
-		}
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (s *envStore) setEnv(key, value string) error {
-	s.Record(key)
-
-	err := os.Setenv(key, value)
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-func (s *envStore) Record(key string) {
-	originValue, exists := os.LookupEnv(key)
-	s.add(Env{
-		Name:   key,
-		Value:  originValue,
-		Exists: exists,
-	})
-}
-
-func NewEnvStore() EnvStore {
-	return newEnvStore()
-}
-
-func newEnvStore() *envStore {
-	return &envStore{store: make(map[string]Env)}
-}
-
-func SetEnvVariables(env map[string]string) (EnvStore, error) {
-	envStore := newEnvStore()
-
-	for k, v := range env {
-		err := envStore.setEnv(k, v)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return envStore, nil
-}
diff --git a/exporters/jaeger/internal/internaltest/env_test.go b/exporters/jaeger/internal/internaltest/env_test.go
deleted file mode 100644
index dc4dcea8e30..00000000000
--- a/exporters/jaeger/internal/internaltest/env_test.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/internaltest/env_test.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internaltest
-
-import (
-	"os"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-	"github.com/stretchr/testify/suite"
-)
-
-type EnvStoreTestSuite struct {
-	suite.Suite
-}
-
-func (s *EnvStoreTestSuite) Test_add() {
-	envStore := newEnvStore()
-
-	e := Env{
-		Name:   "name",
-		Value:  "value",
-		Exists: true,
-	}
-	envStore.add(e)
-	envStore.add(e)
-
-	s.Assert().Len(envStore.store, 1)
-}
-
-func (s *EnvStoreTestSuite) TestRecord() {
-	testCases := []struct {
-		name             string
-		env              Env
-		expectedEnvStore *envStore
-	}{
-		{
-			name: "record exists env",
-			env: Env{
-				Name:   "name",
-				Value:  "value",
-				Exists: true,
-			},
-			expectedEnvStore: &envStore{store: map[string]Env{
-				"name": {
-					Name:   "name",
-					Value:  "value",
-					Exists: true,
-				},
-			}},
-		},
-		{
-			name: "record exists env, but its value is empty",
-			env: Env{
-				Name:   "name",
-				Value:  "",
-				Exists: true,
-			},
-			expectedEnvStore: &envStore{store: map[string]Env{
-				"name": {
-					Name:   "name",
-					Value:  "",
-					Exists: true,
-				},
-			}},
-		},
-		{
-			name: "record not exists env",
-			env: Env{
-				Name:   "name",
-				Exists: false,
-			},
-			expectedEnvStore: &envStore{store: map[string]Env{
-				"name": {
-					Name:   "name",
-					Exists: false,
-				},
-			}},
-		},
-	}
-
-	for _, tc := range testCases {
-		s.Run(tc.name, func() {
-			if tc.env.Exists {
-				s.Assert().NoError(os.Setenv(tc.env.Name, tc.env.Value))
-			}
-
-			envStore := newEnvStore()
-			envStore.Record(tc.env.Name)
-
-			s.Assert().Equal(tc.expectedEnvStore, envStore)
-
-			if tc.env.Exists {
-				s.Assert().NoError(os.Unsetenv(tc.env.Name))
-			}
-		})
-	}
-}
-
-func (s *EnvStoreTestSuite) TestRestore() {
-	testCases := []struct {
-		name              string
-		env               Env
-		expectedEnvValue  string
-		expectedEnvExists bool
-	}{
-		{
-			name: "exists env",
-			env: Env{
-				Name:   "name",
-				Value:  "value",
-				Exists: true,
-			},
-			expectedEnvValue:  "value",
-			expectedEnvExists: true,
-		},
-		{
-			name: "no exists env",
-			env: Env{
-				Name:   "name",
-				Exists: false,
-			},
-			expectedEnvExists: false,
-		},
-	}
-
-	for _, tc := range testCases {
-		s.Run(tc.name, func() {
-			envStore := newEnvStore()
-			envStore.add(tc.env)
-
-			// Backup
-			backup := newEnvStore()
-			backup.Record(tc.env.Name)
-
-			s.Require().NoError(os.Unsetenv(tc.env.Name))
-
-			s.Assert().NoError(envStore.Restore())
-			v, exists := os.LookupEnv(tc.env.Name)
-			s.Assert().Equal(tc.expectedEnvValue, v)
-			s.Assert().Equal(tc.expectedEnvExists, exists)
-
-			// Restore
-			s.Require().NoError(backup.Restore())
-		})
-	}
-}
-
-func (s *EnvStoreTestSuite) Test_setEnv() {
-	testCases := []struct {
-		name              string
-		key               string
-		value             string
-		expectedEnvStore  *envStore
-		expectedEnvValue  string
-		expectedEnvExists bool
-	}{
-		{
-			name:  "normal",
-			key:   "name",
-			value: "value",
-			expectedEnvStore: &envStore{store: map[string]Env{
-				"name": {
-					Name:   "name",
-					Value:  "other value",
-					Exists: true,
-				},
-			}},
-			expectedEnvValue:  "value",
-			expectedEnvExists: true,
-		},
-	}
-
-	for _, tc := range testCases {
-		s.Run(tc.name, func() {
-			envStore := newEnvStore()
-
-			// Backup
-			backup := newEnvStore()
-			backup.Record(tc.key)
-
-			s.Require().NoError(os.Setenv(tc.key, "other value"))
-
-			s.Assert().NoError(envStore.setEnv(tc.key, tc.value))
-			s.Assert().Equal(tc.expectedEnvStore, envStore)
-			v, exists := os.LookupEnv(tc.key)
-			s.Assert().Equal(tc.expectedEnvValue, v)
-			s.Assert().Equal(tc.expectedEnvExists, exists)
-
-			// Restore
-			s.Require().NoError(backup.Restore())
-		})
-	}
-}
-
-func TestEnvStoreTestSuite(t *testing.T) {
-	suite.Run(t, new(EnvStoreTestSuite))
-}
-
-func TestSetEnvVariables(t *testing.T) {
-	envs := map[string]string{
-		"name1": "value1",
-		"name2": "value2",
-	}
-
-	// Backup
-	backup := newEnvStore()
-	for k := range envs {
-		backup.Record(k)
-	}
-	defer func() {
-		require.NoError(t, backup.Restore())
-	}()
-
-	store, err := SetEnvVariables(envs)
-	assert.NoError(t, err)
-	require.IsType(t, &envStore{}, store)
-	concreteStore := store.(*envStore)
-	assert.Len(t, concreteStore.store, 2)
-	assert.Equal(t, backup, concreteStore)
-}
diff --git a/exporters/jaeger/internal/internaltest/errors.go b/exporters/jaeger/internal/internaltest/errors.go
deleted file mode 100644
index 71d48fdf6e8..00000000000
--- a/exporters/jaeger/internal/internaltest/errors.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/internaltest/errors.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internaltest // import "go.opentelemetry.io/otel/exporters/jaeger/internal/internaltest"
-
-type TestError string
-
-var _ error = TestError("")
-
-func NewTestError(s string) error {
-	return TestError(s)
-}
-
-func (e TestError) Error() string {
-	return string(e)
-}
diff --git a/exporters/jaeger/internal/internaltest/harness.go b/exporters/jaeger/internal/internaltest/harness.go
deleted file mode 100644
index a938e733965..00000000000
--- a/exporters/jaeger/internal/internaltest/harness.go
+++ /dev/null
@@ -1,344 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/internaltest/harness.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internaltest // import "go.opentelemetry.io/otel/exporters/jaeger/internal/internaltest"
-
-import (
-	"context"
-	"fmt"
-	"sync"
-	"testing"
-	"time"
-
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/codes"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/matchers"
-	"go.opentelemetry.io/otel/trace"
-)
-
-// Harness is a testing harness used to test implementations of the
-// OpenTelemetry API.
-type Harness struct {
-	t *testing.T
-}
-
-// NewHarness returns an instantiated *Harness using t.
-func NewHarness(t *testing.T) *Harness {
-	return &Harness{
-		t: t,
-	}
-}
-
-// TestTracerProvider runs validation tests for an implementation of the OpenTelemetry
-// TracerProvider API.
-func (h *Harness) TestTracerProvider(subjectFactory func() trace.TracerProvider) {
-	h.t.Run("#Start", func(t *testing.T) {
-		t.Run("allow creating an arbitrary number of TracerProvider instances", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-
-			tp1 := subjectFactory()
-			tp2 := subjectFactory()
-
-			e.Expect(tp1).NotToEqual(tp2)
-		})
-		t.Run("all methods are safe to be called concurrently", func(t *testing.T) {
-			t.Parallel()
-
-			runner := func(tp trace.TracerProvider) <-chan struct{} {
-				done := make(chan struct{})
-				go func(tp trace.TracerProvider) {
-					var wg sync.WaitGroup
-					for i := 0; i < 20; i++ {
-						wg.Add(1)
-						go func(name, version string) {
-							_ = tp.Tracer(name, trace.WithInstrumentationVersion(version))
-							wg.Done()
-						}(fmt.Sprintf("tracer %d", i%5), fmt.Sprintf("%d", i))
-					}
-					wg.Wait()
-					done <- struct{}{}
-				}(tp)
-				return done
-			}
-
-			matchers.NewExpecter(t).Expect(func() {
-				// Run with multiple TracerProvider to ensure they encapsulate
-				// their own Tracers.
-				tp1 := subjectFactory()
-				tp2 := subjectFactory()
-
-				done1 := runner(tp1)
-				done2 := runner(tp2)
-
-				<-done1
-				<-done2
-			}).NotToPanic()
-		})
-	})
-}
-
-// TestTracer runs validation tests for an implementation of the OpenTelemetry
-// Tracer API.
-func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) {
-	h.t.Run("#Start", func(t *testing.T) {
-		t.Run("propagates the original context", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-			subject := subjectFactory()
-
-			ctxKey := testCtxKey{}
-			ctxValue := "ctx value"
-			ctx := context.WithValue(context.Background(), ctxKey, ctxValue)
-
-			ctx, _ = subject.Start(ctx, "test")
-
-			e.Expect(ctx.Value(ctxKey)).ToEqual(ctxValue)
-		})
-
-		t.Run("returns a span containing the expected properties", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-			subject := subjectFactory()
-
-			_, span := subject.Start(context.Background(), "test")
-
-			e.Expect(span).NotToBeNil()
-
-			e.Expect(span.SpanContext().IsValid()).ToBeTrue()
-		})
-
-		t.Run("stores the span on the provided context", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-			subject := subjectFactory()
-
-			ctx, span := subject.Start(context.Background(), "test")
-
-			e.Expect(span).NotToBeNil()
-			e.Expect(span.SpanContext()).NotToEqual(trace.SpanContext{})
-			e.Expect(trace.SpanFromContext(ctx)).ToEqual(span)
-		})
-
-		t.Run("starts spans with unique trace and span IDs", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-			subject := subjectFactory()
-
-			_, span1 := subject.Start(context.Background(), "span1")
-			_, span2 := subject.Start(context.Background(), "span2")
-
-			sc1 := span1.SpanContext()
-			sc2 := span2.SpanContext()
-
-			e.Expect(sc1.TraceID()).NotToEqual(sc2.TraceID())
-			e.Expect(sc1.SpanID()).NotToEqual(sc2.SpanID())
-		})
-
-		t.Run("propagates a parent's trace ID through the context", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-			subject := subjectFactory()
-
-			ctx, parent := subject.Start(context.Background(), "parent")
-			_, child := subject.Start(ctx, "child")
-
-			psc := parent.SpanContext()
-			csc := child.SpanContext()
-
-			e.Expect(csc.TraceID()).ToEqual(psc.TraceID())
-			e.Expect(csc.SpanID()).NotToEqual(psc.SpanID())
-		})
-
-		t.Run("ignores parent's trace ID when new root is requested", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-			subject := subjectFactory()
-
-			ctx, parent := subject.Start(context.Background(), "parent")
-			_, child := subject.Start(ctx, "child", trace.WithNewRoot())
-
-			psc := parent.SpanContext()
-			csc := child.SpanContext()
-
-			e.Expect(csc.TraceID()).NotToEqual(psc.TraceID())
-			e.Expect(csc.SpanID()).NotToEqual(psc.SpanID())
-		})
-
-		t.Run("propagates remote parent's trace ID through the context", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-			subject := subjectFactory()
-
-			_, remoteParent := subject.Start(context.Background(), "remote parent")
-			parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext())
-			_, child := subject.Start(parentCtx, "child")
-
-			psc := remoteParent.SpanContext()
-			csc := child.SpanContext()
-
-			e.Expect(csc.TraceID()).ToEqual(psc.TraceID())
-			e.Expect(csc.SpanID()).NotToEqual(psc.SpanID())
-		})
-
-		t.Run("ignores remote parent's trace ID when new root is requested", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-			subject := subjectFactory()
-
-			_, remoteParent := subject.Start(context.Background(), "remote parent")
-			parentCtx := trace.ContextWithRemoteSpanContext(context.Background(), remoteParent.SpanContext())
-			_, child := subject.Start(parentCtx, "child", trace.WithNewRoot())
-
-			psc := remoteParent.SpanContext()
-			csc := child.SpanContext()
-
-			e.Expect(csc.TraceID()).NotToEqual(psc.TraceID())
-			e.Expect(csc.SpanID()).NotToEqual(psc.SpanID())
-		})
-
-		t.Run("all methods are safe to be called concurrently", func(t *testing.T) {
-			t.Parallel()
-
-			e := matchers.NewExpecter(t)
-			tracer := subjectFactory()
-
-			ctx, parent := tracer.Start(context.Background(), "span")
-
-			runner := func(tp trace.Tracer) <-chan struct{} {
-				done := make(chan struct{})
-				go func(tp trace.Tracer) {
-					var wg sync.WaitGroup
-					for i := 0; i < 20; i++ {
-						wg.Add(1)
-						go func(name string) {
-							defer wg.Done()
-							_, child := tp.Start(ctx, name)
-
-							psc := parent.SpanContext()
-							csc := child.SpanContext()
-
-							e.Expect(csc.TraceID()).ToEqual(psc.TraceID())
-							e.Expect(csc.SpanID()).NotToEqual(psc.SpanID())
-						}(fmt.Sprintf("span %d", i))
-					}
-					wg.Wait()
-					done <- struct{}{}
-				}(tp)
-				return done
-			}
-
-			e.Expect(func() {
-				done := runner(tracer)
-
-				<-done
-			}).NotToPanic()
-		})
-	})
-
-	h.testSpan(subjectFactory)
-}
-
-func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
-	var methods = map[string]func(span trace.Span){
-		"#End": func(span trace.Span) {
-			span.End()
-		},
-		"#AddEvent": func(span trace.Span) {
-			span.AddEvent("test event")
-		},
-		"#AddEventWithTimestamp": func(span trace.Span) {
-			span.AddEvent("test event", trace.WithTimestamp(time.Now().Add(1*time.Second)))
-		},
-		"#SetStatus": func(span trace.Span) {
-			span.SetStatus(codes.Error, "internal")
-		},
-		"#SetName": func(span trace.Span) {
-			span.SetName("new name")
-		},
-		"#SetAttributes": func(span trace.Span) {
-			span.SetAttributes(attribute.String("key1", "value"), attribute.Int("key2", 123))
-		},
-	}
-	var mechanisms = map[string]func() trace.Span{
-		"Span created via Tracer#Start": func() trace.Span {
-			tracer := tracerFactory()
-			_, subject := tracer.Start(context.Background(), "test")
-
-			return subject
-		},
-		"Span created via span.TracerProvider()": func() trace.Span {
-			ctx, spanA := tracerFactory().Start(context.Background(), "span1")
-
-			_, spanB := spanA.TracerProvider().Tracer("second").Start(ctx, "span2")
-			return spanB
-		},
-	}
-
-	for mechanismName, mechanism := range mechanisms {
-		h.t.Run(mechanismName, func(t *testing.T) {
-			for methodName, method := range methods {
-				t.Run(methodName, func(t *testing.T) {
-					t.Run("is thread-safe", func(t *testing.T) {
-						t.Parallel()
-
-						span := mechanism()
-
-						wg := &sync.WaitGroup{}
-						wg.Add(2)
-
-						go func() {
-							defer wg.Done()
-
-							method(span)
-						}()
-
-						go func() {
-							defer wg.Done()
-
-							method(span)
-						}()
-
-						wg.Wait()
-					})
-				})
-			}
-
-			t.Run("#End", func(t *testing.T) {
-				t.Run("can be called multiple times", func(t *testing.T) {
-					t.Parallel()
-
-					span := mechanism()
-
-					span.End()
-					span.End()
-				})
-			})
-		})
-	}
-}
-
-type testCtxKey struct{}
diff --git a/exporters/jaeger/internal/internaltest/text_map_carrier.go b/exporters/jaeger/internal/internaltest/text_map_carrier.go
deleted file mode 100644
index 5c70e789765..00000000000
--- a/exporters/jaeger/internal/internaltest/text_map_carrier.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/internaltest/text_map_carrier.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internaltest // import "go.opentelemetry.io/otel/exporters/jaeger/internal/internaltest"
-
-import (
-	"sync"
-	"testing"
-
-	"go.opentelemetry.io/otel/propagation"
-)
-
-// TextMapCarrier is a storage medium for a TextMapPropagator used in testing.
-// The methods of a TextMapCarrier are concurrent safe.
-type TextMapCarrier struct {
-	mtx sync.Mutex
-
-	gets []string
-	sets [][2]string
-	data map[string]string
-}
-
-var _ propagation.TextMapCarrier = (*TextMapCarrier)(nil)
-
-// NewTextMapCarrier returns a new *TextMapCarrier populated with data.
-func NewTextMapCarrier(data map[string]string) *TextMapCarrier {
-	copied := make(map[string]string, len(data))
-	for k, v := range data {
-		copied[k] = v
-	}
-	return &TextMapCarrier{data: copied}
-}
-
-// Keys returns the keys for which this carrier has a value.
-func (c *TextMapCarrier) Keys() []string {
-	c.mtx.Lock()
-	defer c.mtx.Unlock()
-
-	result := make([]string, 0, len(c.data))
-	for k := range c.data {
-		result = append(result, k)
-	}
-	return result
-}
-
-// Get returns the value associated with the passed key.
-func (c *TextMapCarrier) Get(key string) string {
-	c.mtx.Lock()
-	defer c.mtx.Unlock()
-	c.gets = append(c.gets, key)
-	return c.data[key]
-}
-
-// GotKey tests if c.Get has been called for key.
-func (c *TextMapCarrier) GotKey(t *testing.T, key string) bool {
-	c.mtx.Lock()
-	defer c.mtx.Unlock()
-	for _, k := range c.gets {
-		if k == key {
-			return true
-		}
-	}
-	t.Errorf("TextMapCarrier.Get(%q) has not been called", key)
-	return false
-}
-
-// GotN tests if n calls to c.Get have been made.
-func (c *TextMapCarrier) GotN(t *testing.T, n int) bool {
-	c.mtx.Lock()
-	defer c.mtx.Unlock()
-	if len(c.gets) != n {
-		t.Errorf("TextMapCarrier.Get was called %d times, not %d", len(c.gets), n)
-		return false
-	}
-	return true
-}
-
-// Set stores the key-value pair.
-func (c *TextMapCarrier) Set(key, value string) {
-	c.mtx.Lock()
-	defer c.mtx.Unlock()
-	c.sets = append(c.sets, [2]string{key, value})
-	c.data[key] = value
-}
-
-// SetKeyValue tests if c.Set has been called for the key-value pair.
-func (c *TextMapCarrier) SetKeyValue(t *testing.T, key, value string) bool {
-	c.mtx.Lock()
-	defer c.mtx.Unlock()
-	var vals []string
-	for _, pair := range c.sets {
-		if key == pair[0] {
-			if value == pair[1] {
-				return true
-			}
-			vals = append(vals, pair[1])
-		}
-	}
-	if len(vals) > 0 {
-		t.Errorf("TextMapCarrier.Set called with %q and %v values, but not %s", key, vals, value)
-	}
-	t.Errorf("TextMapCarrier.Set(%q,%q) has not been called", key, value)
-	return false
-}
-
-// SetN tests if n calls to c.Set have been made.
-func (c *TextMapCarrier) SetN(t *testing.T, n int) bool {
-	c.mtx.Lock()
-	defer c.mtx.Unlock()
-	if len(c.sets) != n {
-		t.Errorf("TextMapCarrier.Set was called %d times, not %d", len(c.sets), n)
-		return false
-	}
-	return true
-}
-
-// Reset zeros out the recording state and sets the carried values to data.
-func (c *TextMapCarrier) Reset(data map[string]string) {
-	copied := make(map[string]string, len(data))
-	for k, v := range data {
-		copied[k] = v
-	}
-
-	c.mtx.Lock()
-	defer c.mtx.Unlock()
-
-	c.gets = nil
-	c.sets = nil
-	c.data = copied
-}
diff --git a/exporters/jaeger/internal/internaltest/text_map_carrier_test.go b/exporters/jaeger/internal/internaltest/text_map_carrier_test.go
deleted file mode 100644
index faf713cc2d0..00000000000
--- a/exporters/jaeger/internal/internaltest/text_map_carrier_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/internaltest/text_map_carrier_test.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internaltest
-
-import (
-	"reflect"
-	"testing"
-)
-
-var (
-	key, value = "test", "true"
-)
-
-func TestTextMapCarrierKeys(t *testing.T) {
-	tmc := NewTextMapCarrier(map[string]string{key: value})
-	expected, actual := []string{key}, tmc.Keys()
-	if !reflect.DeepEqual(actual, expected) {
-		t.Errorf("expected tmc.Keys() to be %v but it was %v", expected, actual)
-	}
-}
-
-func TestTextMapCarrierGet(t *testing.T) {
-	tmc := NewTextMapCarrier(map[string]string{key: value})
-	tmc.GotN(t, 0)
-	if got := tmc.Get("empty"); got != "" {
-		t.Errorf("TextMapCarrier.Get returned %q for an empty key", got)
-	}
-	tmc.GotKey(t, "empty")
-	tmc.GotN(t, 1)
-	if got := tmc.Get(key); got != value {
-		t.Errorf("TextMapCarrier.Get(%q) returned %q, want %q", key, got, value)
-	}
-	tmc.GotKey(t, key)
-	tmc.GotN(t, 2)
-}
-
-func TestTextMapCarrierSet(t *testing.T) {
-	tmc := NewTextMapCarrier(nil)
-	tmc.SetN(t, 0)
-	tmc.Set(key, value)
-	if got, ok := tmc.data[key]; !ok {
-		t.Errorf("TextMapCarrier.Set(%q,%q) failed to store pair", key, value)
-	} else if got != value {
-		t.Errorf("TextMapCarrier.Set(%q,%q) stored (%q,%q), not (%q,%q)", key, value, key, got, key, value)
-	}
-	tmc.SetKeyValue(t, key, value)
-	tmc.SetN(t, 1)
-}
-
-func TestTextMapCarrierReset(t *testing.T) {
-	tmc := NewTextMapCarrier(map[string]string{key: value})
-	tmc.GotN(t, 0)
-	tmc.SetN(t, 0)
-	tmc.Reset(nil)
-	tmc.GotN(t, 0)
-	tmc.SetN(t, 0)
-	if got := tmc.Get(key); got != "" {
-		t.Error("TextMapCarrier.Reset() failed to clear initial data")
-	}
-	tmc.GotN(t, 1)
-	tmc.GotKey(t, key)
-	tmc.Set(key, value)
-	tmc.SetKeyValue(t, key, value)
-	tmc.SetN(t, 1)
-	tmc.Reset(nil)
-	tmc.GotN(t, 0)
-	tmc.SetN(t, 0)
-	if got := tmc.Get(key); got != "" {
-		t.Error("TextMapCarrier.Reset() failed to clear data")
-	}
-}
diff --git a/exporters/jaeger/internal/internaltest/text_map_propagator.go b/exporters/jaeger/internal/internaltest/text_map_propagator.go
deleted file mode 100644
index c1c22117a06..00000000000
--- a/exporters/jaeger/internal/internaltest/text_map_propagator.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/internaltest/text_map_propagator.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internaltest // import "go.opentelemetry.io/otel/exporters/jaeger/internal/internaltest"
-
-import (
-	"context"
-	"fmt"
-	"strconv"
-	"strings"
-	"testing"
-
-	"go.opentelemetry.io/otel/propagation"
-)
-
-type ctxKeyType string
-
-type state struct {
-	Injections  uint64
-	Extractions uint64
-}
-
-func newState(encoded string) state {
-	if encoded == "" {
-		return state{}
-	}
-	s0, s1, _ := strings.Cut(encoded, ",")
-	injects, _ := strconv.ParseUint(s0, 10, 64)
-	extracts, _ := strconv.ParseUint(s1, 10, 64)
-	return state{
-		Injections:  injects,
-		Extractions: extracts,
-	}
-}
-
-func (s state) String() string {
-	return fmt.Sprintf("%d,%d", s.Injections, s.Extractions)
-}
-
-// TextMapPropagator is a propagation.TextMapPropagator used for testing.
-type TextMapPropagator struct {
-	name   string
-	ctxKey ctxKeyType
-}
-
-var _ propagation.TextMapPropagator = (*TextMapPropagator)(nil)
-
-// NewTextMapPropagator returns a new TextMapPropagator for testing. It will
-// use name as the key it injects into a TextMapCarrier when Inject is called.
-func NewTextMapPropagator(name string) *TextMapPropagator {
-	return &TextMapPropagator{name: name, ctxKey: ctxKeyType(name)}
-}
-
-func (p *TextMapPropagator) stateFromContext(ctx context.Context) state {
-	if v := ctx.Value(p.ctxKey); v != nil {
-		if s, ok := v.(state); ok {
-			return s
-		}
-	}
-	return state{}
-}
-
-func (p *TextMapPropagator) stateFromCarrier(carrier propagation.TextMapCarrier) state {
-	return newState(carrier.Get(p.name))
-}
-
-// Inject sets cross-cutting concerns for p from ctx into carrier.
-func (p *TextMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) {
-	s := p.stateFromContext(ctx)
-	s.Injections++
-	carrier.Set(p.name, s.String())
-}
-
-// InjectedN tests if p has made n injections to carrier.
-func (p *TextMapPropagator) InjectedN(t *testing.T, carrier *TextMapCarrier, n int) bool {
-	if actual := p.stateFromCarrier(carrier).Injections; actual != uint64(n) {
-		t.Errorf("TextMapPropagator{%q} injected %d times, not %d", p.name, actual, n)
-		return false
-	}
-	return true
-}
-
-// Extract reads cross-cutting concerns for p from carrier into ctx.
-func (p *TextMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context {
-	s := p.stateFromCarrier(carrier)
-	s.Extractions++
-	return context.WithValue(ctx, p.ctxKey, s)
-}
-
-// ExtractedN tests if p has made n extractions from the lineage of ctx.
-// nolint (context is not first arg)
-func (p *TextMapPropagator) ExtractedN(t *testing.T, ctx context.Context, n int) bool {
-	if actual := p.stateFromContext(ctx).Extractions; actual != uint64(n) {
-		t.Errorf("TextMapPropagator{%q} extracted %d time, not %d", p.name, actual, n)
-		return false
-	}
-	return true
-}
-
-// Fields returns the name of p as the key who's value is set with Inject.
-func (p *TextMapPropagator) Fields() []string { return []string{p.name} }
diff --git a/exporters/jaeger/internal/internaltest/text_map_propagator_test.go b/exporters/jaeger/internal/internaltest/text_map_propagator_test.go
deleted file mode 100644
index babcc95fc1b..00000000000
--- a/exporters/jaeger/internal/internaltest/text_map_propagator_test.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/internaltest/text_map_propagator_test.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internaltest
-
-import (
-	"context"
-	"testing"
-)
-
-func TestTextMapPropagatorInjectExtract(t *testing.T) {
-	name := "testing"
-	ctx := context.Background()
-	carrier := NewTextMapCarrier(map[string]string{name: value})
-	propagator := NewTextMapPropagator(name)
-
-	propagator.Inject(ctx, carrier)
-	// Carrier value overridden with state.
-	if carrier.SetKeyValue(t, name, "1,0") {
-		// Ensure nothing has been extracted yet.
-		propagator.ExtractedN(t, ctx, 0)
-		// Test the injection was counted.
-		propagator.InjectedN(t, carrier, 1)
-	}
-
-	ctx = propagator.Extract(ctx, carrier)
-	v := ctx.Value(ctxKeyType(name))
-	if v == nil {
-		t.Error("TextMapPropagator.Extract failed to extract state")
-	}
-	if s, ok := v.(state); !ok {
-		t.Error("TextMapPropagator.Extract did not extract proper state")
-	} else if s.Extractions != 1 {
-		t.Error("TextMapPropagator.Extract did not increment state.Extractions")
-	}
-	if carrier.GotKey(t, name) {
-		// Test the extraction was counted.
-		propagator.ExtractedN(t, ctx, 1)
-		// Ensure no additional injection was recorded.
-		propagator.InjectedN(t, carrier, 1)
-	}
-}
-
-func TestTextMapPropagatorFields(t *testing.T) {
-	name := "testing"
-	propagator := NewTextMapPropagator(name)
-	if got := propagator.Fields(); len(got) != 1 {
-		t.Errorf("TextMapPropagator.Fields returned %d fields, want 1", len(got))
-	} else if got[0] != name {
-		t.Errorf("TextMapPropagator.Fields returned %q, want %q", got[0], name)
-	}
-}
-
-func TestNewStateEmpty(t *testing.T) {
-	if want, got := (state{}), newState(""); got != want {
-		t.Errorf("newState(\"\") returned %v, want %v", got, want)
-	}
-}
diff --git a/exporters/jaeger/internal/matchers/expectation.go b/exporters/jaeger/internal/matchers/expectation.go
deleted file mode 100644
index 8b1ab860867..00000000000
--- a/exporters/jaeger/internal/matchers/expectation.go
+++ /dev/null
@@ -1,310 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/matchers/expectation.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package matchers // import "go.opentelemetry.io/otel/exporters/jaeger/internal/matchers"
-
-import (
-	"fmt"
-	"reflect"
-	"regexp"
-	"runtime/debug"
-	"strings"
-	"testing"
-	"time"
-)
-
-var (
-	stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`)
-)
-
-type Expectation struct {
-	t      *testing.T
-	actual interface{}
-}
-
-func (e *Expectation) ToEqual(expected interface{}) {
-	e.verifyExpectedNotNil(expected)
-
-	if !reflect.DeepEqual(e.actual, expected) {
-		e.fail(fmt.Sprintf("Expected\n\t%v\nto equal\n\t%v", e.actual, expected))
-	}
-}
-
-func (e *Expectation) NotToEqual(expected interface{}) {
-	e.verifyExpectedNotNil(expected)
-
-	if reflect.DeepEqual(e.actual, expected) {
-		e.fail(fmt.Sprintf("Expected\n\t%v\nnot to equal\n\t%v", e.actual, expected))
-	}
-}
-
-func (e *Expectation) ToBeNil() {
-	if e.actual != nil {
-		e.fail(fmt.Sprintf("Expected\n\t%v\nto be nil", e.actual))
-	}
-}
-
-func (e *Expectation) NotToBeNil() {
-	if e.actual == nil {
-		e.fail(fmt.Sprintf("Expected\n\t%v\nnot to be nil", e.actual))
-	}
-}
-
-func (e *Expectation) ToBeTrue() {
-	switch a := e.actual.(type) {
-	case bool:
-		if !a {
-			e.fail(fmt.Sprintf("Expected\n\t%v\nto be true", e.actual))
-		}
-	default:
-		e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a))
-	}
-}
-
-func (e *Expectation) ToBeFalse() {
-	switch a := e.actual.(type) {
-	case bool:
-		if a {
-			e.fail(fmt.Sprintf("Expected\n\t%v\nto be false", e.actual))
-		}
-	default:
-		e.fail(fmt.Sprintf("Cannot check if non-bool value\n\t%v\nis truthy", a))
-	}
-}
-
-func (e *Expectation) NotToPanic() {
-	switch a := e.actual.(type) {
-	case func():
-		func() {
-			defer func() {
-				if recovered := recover(); recovered != nil {
-					e.fail(fmt.Sprintf("Expected panic\n\t%v\nto have not been raised", recovered))
-				}
-			}()
-
-			a()
-		}()
-	default:
-		e.fail(fmt.Sprintf("Cannot check if non-func value\n\t%v\nis truthy", a))
-	}
-}
-
-func (e *Expectation) ToSucceed() {
-	switch actual := e.actual.(type) {
-	case error:
-		if actual != nil {
-			e.fail(fmt.Sprintf("Expected error\n\t%v\nto have succeeded", actual))
-		}
-	default:
-		e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nsucceeded", actual))
-	}
-}
-
-func (e *Expectation) ToMatchError(expected interface{}) {
-	e.verifyExpectedNotNil(expected)
-
-	actual, ok := e.actual.(error)
-	if !ok {
-		e.fail(fmt.Sprintf("Cannot check if non-error value\n\t%v\nmatches error", e.actual))
-	}
-
-	switch expected := expected.(type) {
-	case error:
-		if !reflect.DeepEqual(actual, expected) {
-			e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected))
-		}
-	case string:
-		if actual.Error() != expected {
-			e.fail(fmt.Sprintf("Expected\n\t%v\nto match error\n\t%v", actual, expected))
-		}
-	default:
-		e.fail(fmt.Sprintf("Cannot match\n\t%v\nagainst non-error\n\t%v", actual, expected))
-	}
-}
-
-func (e *Expectation) ToContain(expected interface{}) {
-	actualValue := reflect.ValueOf(e.actual)
-	actualKind := actualValue.Kind()
-
-	switch actualKind {
-	case reflect.Array, reflect.Slice:
-	default:
-		e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual))
-		return
-	}
-
-	expectedValue := reflect.ValueOf(expected)
-	expectedKind := expectedValue.Kind()
-
-	switch expectedKind {
-	case reflect.Array, reflect.Slice:
-	default:
-		expectedValue = reflect.ValueOf([]interface{}{expected})
-	}
-
-	for i := 0; i < expectedValue.Len(); i++ {
-		var contained bool
-		expectedElem := expectedValue.Index(i).Interface()
-
-		for j := 0; j < actualValue.Len(); j++ {
-			if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) {
-				contained = true
-				break
-			}
-		}
-
-		if !contained {
-			e.fail(fmt.Sprintf("Expected\n\t%v\nto contain\n\t%v", e.actual, expectedElem))
-			return
-		}
-	}
-}
-
-func (e *Expectation) NotToContain(expected interface{}) {
-	actualValue := reflect.ValueOf(e.actual)
-	actualKind := actualValue.Kind()
-
-	switch actualKind {
-	case reflect.Array, reflect.Slice:
-	default:
-		e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", e.actual))
-		return
-	}
-
-	expectedValue := reflect.ValueOf(expected)
-	expectedKind := expectedValue.Kind()
-
-	switch expectedKind {
-	case reflect.Array, reflect.Slice:
-	default:
-		expectedValue = reflect.ValueOf([]interface{}{expected})
-	}
-
-	for i := 0; i < expectedValue.Len(); i++ {
-		expectedElem := expectedValue.Index(i).Interface()
-
-		for j := 0; j < actualValue.Len(); j++ {
-			if reflect.DeepEqual(actualValue.Index(j).Interface(), expectedElem) {
-				e.fail(fmt.Sprintf("Expected\n\t%v\nnot to contain\n\t%v", e.actual, expectedElem))
-				return
-			}
-		}
-	}
-}
-
-func (e *Expectation) ToMatchInAnyOrder(expected interface{}) {
-	expectedValue := reflect.ValueOf(expected)
-	expectedKind := expectedValue.Kind()
-
-	switch expectedKind {
-	case reflect.Array, reflect.Slice:
-	default:
-		e.fail(fmt.Sprintf("Expected\n\t%v\nto be an array", expected))
-		return
-	}
-
-	actualValue := reflect.ValueOf(e.actual)
-	actualKind := actualValue.Kind()
-
-	if actualKind != expectedKind {
-		e.fail(fmt.Sprintf("Expected\n\t%v\nto be the same type as\n\t%v", e.actual, expected))
-		return
-	}
-
-	if actualValue.Len() != expectedValue.Len() {
-		e.fail(fmt.Sprintf("Expected\n\t%v\nto have the same length as\n\t%v", e.actual, expected))
-		return
-	}
-
-	var unmatched []interface{}
-
-	for i := 0; i < expectedValue.Len(); i++ {
-		unmatched = append(unmatched, expectedValue.Index(i).Interface())
-	}
-
-	for i := 0; i < actualValue.Len(); i++ {
-		var found bool
-
-		for j, elem := range unmatched {
-			if reflect.DeepEqual(actualValue.Index(i).Interface(), elem) {
-				found = true
-				unmatched = append(unmatched[:j], unmatched[j+1:]...)
-
-				break
-			}
-		}
-
-		if !found {
-			e.fail(fmt.Sprintf("Expected\n\t%v\nto contain the same elements as\n\t%v", e.actual, expected))
-		}
-	}
-}
-
-func (e *Expectation) ToBeTemporally(matcher TemporalMatcher, compareTo interface{}) {
-	if actual, ok := e.actual.(time.Time); ok {
-		ct, ok := compareTo.(time.Time)
-		if !ok {
-			e.fail(fmt.Sprintf("Cannot compare to non-temporal value\n\t%v", compareTo))
-			return
-		}
-
-		switch matcher {
-		case Before:
-			if !actual.Before(ct) {
-				e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before\n\t%v", e.actual, compareTo))
-			}
-		case BeforeOrSameTime:
-			if actual.After(ct) {
-				e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally before or at the same time as\n\t%v", e.actual, compareTo))
-			}
-		case After:
-			if !actual.After(ct) {
-				e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after\n\t%v", e.actual, compareTo))
-			}
-		case AfterOrSameTime:
-			if actual.Before(ct) {
-				e.fail(fmt.Sprintf("Expected\n\t%v\nto be temporally after or at the same time as\n\t%v", e.actual, compareTo))
-			}
-		default:
-			e.fail("Cannot compare times with unexpected temporal matcher")
-		}
-
-		return
-	}
-
-	e.fail(fmt.Sprintf("Cannot compare non-temporal value\n\t%v", e.actual))
-}
-
-func (e *Expectation) verifyExpectedNotNil(expected interface{}) {
-	if expected == nil {
-		e.fail("Refusing to compare with <nil>. Use `ToBeNil` or `NotToBeNil` instead.")
-	}
-}
-
-func (e *Expectation) fail(msg string) {
-	// Prune the stack trace so that it's easier to see relevant lines
-	stack := strings.Split(string(debug.Stack()), "\n")
-	var prunedStack []string
-
-	for _, line := range stack {
-		if !stackTracePruneRE.MatchString(line) {
-			prunedStack = append(prunedStack, line)
-		}
-	}
-
-	e.t.Fatalf("\n%s\n%s\n", strings.Join(prunedStack, "\n"), msg)
-}
diff --git a/exporters/jaeger/internal/matchers/temporal_matcher.go b/exporters/jaeger/internal/matchers/temporal_matcher.go
deleted file mode 100644
index 82871f71089..00000000000
--- a/exporters/jaeger/internal/matchers/temporal_matcher.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/matchers/temporal_matcher.go.tmpl
-
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package matchers // import "go.opentelemetry.io/otel/exporters/jaeger/internal/matchers"
-
-type TemporalMatcher byte
-
-//nolint:revive // ignoring missing comments for unexported constants in an internal package
-const (
-	Before TemporalMatcher = iota
-	BeforeOrSameTime
-	After
-	AfterOrSameTime
-)
diff --git a/exporters/jaeger/internal/third_party/thrift/LICENSE b/exporters/jaeger/internal/third_party/thrift/LICENSE
deleted file mode 100644
index 2bc6fbbf65c..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/LICENSE
+++ /dev/null
@@ -1,306 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
---------------------------------------------------
-SOFTWARE DISTRIBUTED WITH THRIFT:
-
-The Apache Thrift software includes a number of subcomponents with
-separate copyright notices and license terms. Your use of the source
-code for the these subcomponents is subject to the terms and
-conditions of the following licenses.
-
---------------------------------------------------
-Portions of the following files are licensed under the MIT License:
-
-  lib/erl/src/Makefile.am
-
-Please see doc/otp-base-license.txt for the full terms of this license.
-
---------------------------------------------------
-For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
-
-#   Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
-#
-#   Copying and distribution of this file, with or without
-#   modification, are permitted in any medium without royalty provided
-#   the copyright notice and this notice are preserved.
-
---------------------------------------------------
-For the lib/nodejs/lib/thrift/json_parse.js:
-
-/*
-    json_parse.js
-    2015-05-02
-    Public Domain.
-    NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
-
-*/
-(By Douglas Crockford <douglas@crockford.com>)
-
---------------------------------------------------
-For lib/cpp/src/thrift/windows/SocketPair.cpp
-
-/* socketpair.c
- * Copyright 2007 by Nathan C. Myers <ncm@cantrip.org>; some rights reserved.
- * This code is Free Software.  It may be copied freely, in original or
- * modified form, subject only to the restrictions that (1) the author is
- * relieved from all responsibilities for any use for any purpose, and (2)
- * this copyright notice must be retained, unchanged, in its entirety.  If
- * for any reason the author might be held responsible for any consequences
- * of copying or use, license is withheld.
- */
-
-
---------------------------------------------------
-For lib/py/compat/win32/stdint.h
-
-// ISO C9x  compliant stdint.h for Microsoft Visual Studio
-// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
-//
-//  Copyright (c) 2006-2008 Alexander Chemeris
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-//   1. Redistributions of source code must retain the above copyright notice,
-//      this list of conditions and the following disclaimer.
-//
-//   2. Redistributions in binary form must reproduce the above copyright
-//      notice, this list of conditions and the following disclaimer in the
-//      documentation and/or other materials provided with the distribution.
-//
-//   3. The name of the author may be used to endorse or promote products
-//      derived from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
-// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
-// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-///////////////////////////////////////////////////////////////////////////////
-
-
---------------------------------------------------
-Codegen template in t_html_generator.h
-
-* Bootstrap v2.0.3
-*
-* Copyright 2012 Twitter, Inc
-* Licensed under the Apache License v2.0
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Designed and built with all the love in the world @twitter by @mdo and @fat.
-
----------------------------------------------------
-For t_cl_generator.cc
-
- * Copyright (c) 2008- Patrick Collison <patrick@collison.ie>
- * Copyright (c) 2006- Facebook
-
----------------------------------------------------
diff --git a/exporters/jaeger/internal/third_party/thrift/NOTICE b/exporters/jaeger/internal/third_party/thrift/NOTICE
deleted file mode 100644
index 37824e7fb66..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Apache Thrift
-Copyright (C) 2006 - 2019, The Apache Software Foundation
-
-This product includes software developed at
-The Apache Software Foundation (http://www.apache.org/).
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go
deleted file mode 100644
index 32d5b0147a2..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/application_exception.go
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-)
-
-const (
-	UNKNOWN_APPLICATION_EXCEPTION  = 0
-	UNKNOWN_METHOD                 = 1
-	INVALID_MESSAGE_TYPE_EXCEPTION = 2
-	WRONG_METHOD_NAME              = 3
-	BAD_SEQUENCE_ID                = 4
-	MISSING_RESULT                 = 5
-	INTERNAL_ERROR                 = 6
-	PROTOCOL_ERROR                 = 7
-	INVALID_TRANSFORM              = 8
-	INVALID_PROTOCOL               = 9
-	UNSUPPORTED_CLIENT_TYPE        = 10
-)
-
-var defaultApplicationExceptionMessage = map[int32]string{
-	UNKNOWN_APPLICATION_EXCEPTION:  "unknown application exception",
-	UNKNOWN_METHOD:                 "unknown method",
-	INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
-	WRONG_METHOD_NAME:              "wrong method name",
-	BAD_SEQUENCE_ID:                "bad sequence ID",
-	MISSING_RESULT:                 "missing result",
-	INTERNAL_ERROR:                 "unknown internal error",
-	PROTOCOL_ERROR:                 "unknown protocol error",
-	INVALID_TRANSFORM:              "Invalid transform",
-	INVALID_PROTOCOL:               "Invalid protocol",
-	UNSUPPORTED_CLIENT_TYPE:        "Unsupported client type",
-}
-
-// Application level Thrift exception
-type TApplicationException interface {
-	TException
-	TypeId() int32
-	Read(ctx context.Context, iprot TProtocol) error
-	Write(ctx context.Context, oprot TProtocol) error
-}
-
-type tApplicationException struct {
-	message string
-	type_   int32
-}
-
-var _ TApplicationException = (*tApplicationException)(nil)
-
-func (tApplicationException) TExceptionType() TExceptionType {
-	return TExceptionTypeApplication
-}
-
-func (e tApplicationException) Error() string {
-	if e.message != "" {
-		return e.message
-	}
-	return defaultApplicationExceptionMessage[e.type_]
-}
-
-func NewTApplicationException(type_ int32, message string) TApplicationException {
-	return &tApplicationException{message, type_}
-}
-
-func (p *tApplicationException) TypeId() int32 {
-	return p.type_
-}
-
-func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
-	// TODO: this should really be generated by the compiler
-	_, err := iprot.ReadStructBegin(ctx)
-	if err != nil {
-		return err
-	}
-
-	message := ""
-	type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
-
-	for {
-		_, ttype, id, err := iprot.ReadFieldBegin(ctx)
-		if err != nil {
-			return err
-		}
-		if ttype == STOP {
-			break
-		}
-		switch id {
-		case 1:
-			if ttype == STRING {
-				if message, err = iprot.ReadString(ctx); err != nil {
-					return err
-				}
-			} else {
-				if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
-					return err
-				}
-			}
-		case 2:
-			if ttype == I32 {
-				if type_, err = iprot.ReadI32(ctx); err != nil {
-					return err
-				}
-			} else {
-				if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
-					return err
-				}
-			}
-		default:
-			if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
-				return err
-			}
-		}
-		if err = iprot.ReadFieldEnd(ctx); err != nil {
-			return err
-		}
-	}
-	if err := iprot.ReadStructEnd(ctx); err != nil {
-		return err
-	}
-
-	p.message = message
-	p.type_ = type_
-
-	return nil
-}
-
-func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
-	err = oprot.WriteStructBegin(ctx, "TApplicationException")
-	if len(p.Error()) > 0 {
-		err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
-		if err != nil {
-			return
-		}
-		err = oprot.WriteString(ctx, p.Error())
-		if err != nil {
-			return
-		}
-		err = oprot.WriteFieldEnd(ctx)
-		if err != nil {
-			return
-		}
-	}
-	err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
-	if err != nil {
-		return
-	}
-	err = oprot.WriteI32(ctx, p.type_)
-	if err != nil {
-		return
-	}
-	err = oprot.WriteFieldEnd(ctx)
-	if err != nil {
-		return
-	}
-	err = oprot.WriteFieldStop(ctx)
-	if err != nil {
-		return
-	}
-	err = oprot.WriteStructEnd(ctx)
-	return
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go
deleted file mode 100644
index 45c880d32f8..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/binary_protocol.go
+++ /dev/null
@@ -1,555 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"bytes"
-	"context"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"math"
-)
-
-type TBinaryProtocol struct {
-	trans         TRichTransport
-	origTransport TTransport
-	cfg           *TConfiguration
-	buffer        [64]byte
-}
-
-type TBinaryProtocolFactory struct {
-	cfg *TConfiguration
-}
-
-// Deprecated: Use NewTBinaryProtocolConf instead.
-func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
-	return NewTBinaryProtocolConf(t, &TConfiguration{
-		noPropagation: true,
-	})
-}
-
-// Deprecated: Use NewTBinaryProtocolConf instead.
-func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
-	return NewTBinaryProtocolConf(t, &TConfiguration{
-		TBinaryStrictRead:  &strictRead,
-		TBinaryStrictWrite: &strictWrite,
-
-		noPropagation: true,
-	})
-}
-
-func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
-	PropagateTConfiguration(t, conf)
-	p := &TBinaryProtocol{
-		origTransport: t,
-		cfg:           conf,
-	}
-	if et, ok := t.(TRichTransport); ok {
-		p.trans = et
-	} else {
-		p.trans = NewTRichTransport(t)
-	}
-	return p
-}
-
-// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
-func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
-	return NewTBinaryProtocolFactoryConf(&TConfiguration{
-		noPropagation: true,
-	})
-}
-
-// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
-func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
-	return NewTBinaryProtocolFactoryConf(&TConfiguration{
-		TBinaryStrictRead:  &strictRead,
-		TBinaryStrictWrite: &strictWrite,
-
-		noPropagation: true,
-	})
-}
-
-func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
-	return &TBinaryProtocolFactory{
-		cfg: conf,
-	}
-}
-
-func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
-	return NewTBinaryProtocolConf(t, p.cfg)
-}
-
-func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
-	p.cfg = conf
-}
-
-/**
- * Writing Methods
- */
-
-func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
-	if p.cfg.GetTBinaryStrictWrite() {
-		version := uint32(VERSION_1) | uint32(typeId)
-		e := p.WriteI32(ctx, int32(version))
-		if e != nil {
-			return e
-		}
-		e = p.WriteString(ctx, name)
-		if e != nil {
-			return e
-		}
-		e = p.WriteI32(ctx, seqId)
-		return e
-	} else {
-		e := p.WriteString(ctx, name)
-		if e != nil {
-			return e
-		}
-		e = p.WriteByte(ctx, int8(typeId))
-		if e != nil {
-			return e
-		}
-		e = p.WriteI32(ctx, seqId)
-		return e
-	}
-	return nil
-}
-
-func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
-	e := p.WriteByte(ctx, int8(typeId))
-	if e != nil {
-		return e
-	}
-	e = p.WriteI16(ctx, id)
-	return e
-}
-
-func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
-	e := p.WriteByte(ctx, STOP)
-	return e
-}
-
-func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
-	e := p.WriteByte(ctx, int8(keyType))
-	if e != nil {
-		return e
-	}
-	e = p.WriteByte(ctx, int8(valueType))
-	if e != nil {
-		return e
-	}
-	e = p.WriteI32(ctx, int32(size))
-	return e
-}
-
-func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
-	e := p.WriteByte(ctx, int8(elemType))
-	if e != nil {
-		return e
-	}
-	e = p.WriteI32(ctx, int32(size))
-	return e
-}
-
-func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
-	e := p.WriteByte(ctx, int8(elemType))
-	if e != nil {
-		return e
-	}
-	e = p.WriteI32(ctx, int32(size))
-	return e
-}
-
-func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
-	if value {
-		return p.WriteByte(ctx, 1)
-	}
-	return p.WriteByte(ctx, 0)
-}
-
-func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
-	e := p.trans.WriteByte(byte(value))
-	return NewTProtocolException(e)
-}
-
-func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
-	v := p.buffer[0:2]
-	binary.BigEndian.PutUint16(v, uint16(value))
-	_, e := p.trans.Write(v)
-	return NewTProtocolException(e)
-}
-
-func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
-	v := p.buffer[0:4]
-	binary.BigEndian.PutUint32(v, uint32(value))
-	_, e := p.trans.Write(v)
-	return NewTProtocolException(e)
-}
-
-func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
-	v := p.buffer[0:8]
-	binary.BigEndian.PutUint64(v, uint64(value))
-	_, err := p.trans.Write(v)
-	return NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
-	return p.WriteI64(ctx, int64(math.Float64bits(value)))
-}
-
-func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
-	e := p.WriteI32(ctx, int32(len(value)))
-	if e != nil {
-		return e
-	}
-	_, err := p.trans.WriteString(value)
-	return NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
-	e := p.WriteI32(ctx, int32(len(value)))
-	if e != nil {
-		return e
-	}
-	_, err := p.trans.Write(value)
-	return NewTProtocolException(err)
-}
-
-/**
- * Reading methods
- */
-
-func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
-	size, e := p.ReadI32(ctx)
-	if e != nil {
-		return "", typeId, 0, NewTProtocolException(e)
-	}
-	if size < 0 {
-		typeId = TMessageType(size & 0x0ff)
-		version := int64(int64(size) & VERSION_MASK)
-		if version != VERSION_1 {
-			return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
-		}
-		name, e = p.ReadString(ctx)
-		if e != nil {
-			return name, typeId, seqId, NewTProtocolException(e)
-		}
-		seqId, e = p.ReadI32(ctx)
-		if e != nil {
-			return name, typeId, seqId, NewTProtocolException(e)
-		}
-		return name, typeId, seqId, nil
-	}
-	if p.cfg.GetTBinaryStrictRead() {
-		return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
-	}
-	name, e2 := p.readStringBody(size)
-	if e2 != nil {
-		return name, typeId, seqId, e2
-	}
-	b, e3 := p.ReadByte(ctx)
-	if e3 != nil {
-		return name, typeId, seqId, e3
-	}
-	typeId = TMessageType(b)
-	seqId, e4 := p.ReadI32(ctx)
-	if e4 != nil {
-		return name, typeId, seqId, e4
-	}
-	return name, typeId, seqId, nil
-}
-
-func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
-	return
-}
-
-func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
-	t, err := p.ReadByte(ctx)
-	typeId = TType(t)
-	if err != nil {
-		return name, typeId, seqId, err
-	}
-	if t != STOP {
-		seqId, err = p.ReadI16(ctx)
-	}
-	return name, typeId, seqId, err
-}
-
-func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
-	return nil
-}
-
-var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
-
-func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
-	k, e := p.ReadByte(ctx)
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	kType = TType(k)
-	v, e := p.ReadByte(ctx)
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	vType = TType(v)
-	size32, e := p.ReadI32(ctx)
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	if size32 < 0 {
-		err = invalidDataLength
-		return
-	}
-	size = int(size32)
-	return kType, vType, size, nil
-}
-
-func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
-	b, e := p.ReadByte(ctx)
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	elemType = TType(b)
-	size32, e := p.ReadI32(ctx)
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	if size32 < 0 {
-		err = invalidDataLength
-		return
-	}
-	size = int(size32)
-
-	return
-}
-
-func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
-	b, e := p.ReadByte(ctx)
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	elemType = TType(b)
-	size32, e := p.ReadI32(ctx)
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	if size32 < 0 {
-		err = invalidDataLength
-		return
-	}
-	size = int(size32)
-	return elemType, size, nil
-}
-
-func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
-	b, e := p.ReadByte(ctx)
-	v := true
-	if b != 1 {
-		v = false
-	}
-	return v, e
-}
-
-func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
-	v, err := p.trans.ReadByte()
-	return int8(v), err
-}
-
-func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
-	buf := p.buffer[0:2]
-	err = p.readAll(ctx, buf)
-	value = int16(binary.BigEndian.Uint16(buf))
-	return value, err
-}
-
-func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
-	buf := p.buffer[0:4]
-	err = p.readAll(ctx, buf)
-	value = int32(binary.BigEndian.Uint32(buf))
-	return value, err
-}
-
-func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
-	buf := p.buffer[0:8]
-	err = p.readAll(ctx, buf)
-	value = int64(binary.BigEndian.Uint64(buf))
-	return value, err
-}
-
-func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
-	buf := p.buffer[0:8]
-	err = p.readAll(ctx, buf)
-	value = math.Float64frombits(binary.BigEndian.Uint64(buf))
-	return value, err
-}
-
-func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
-	size, e := p.ReadI32(ctx)
-	if e != nil {
-		return "", e
-	}
-	err = checkSizeForProtocol(size, p.cfg)
-	if err != nil {
-		return
-	}
-	if size < 0 {
-		err = invalidDataLength
-		return
-	}
-	if size == 0 {
-		return "", nil
-	}
-	if size < int32(len(p.buffer)) {
-		// Avoid allocation on small reads
-		buf := p.buffer[:size]
-		read, e := io.ReadFull(p.trans, buf)
-		return string(buf[:read]), NewTProtocolException(e)
-	}
-
-	return p.readStringBody(size)
-}
-
-func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
-	size, e := p.ReadI32(ctx)
-	if e != nil {
-		return nil, e
-	}
-	if err := checkSizeForProtocol(size, p.cfg); err != nil {
-		return nil, err
-	}
-
-	buf, err := safeReadBytes(size, p.trans)
-	return buf, NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
-	return NewTProtocolException(p.trans.Flush(ctx))
-}
-
-func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
-	return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TBinaryProtocol) Transport() TTransport {
-	return p.origTransport
-}
-
-func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
-	var read int
-	_, deadlineSet := ctx.Deadline()
-	for {
-		read, err = io.ReadFull(p.trans, buf)
-		if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
-			// This is I/O timeout without anything read,
-			// and we still have time left, keep retrying.
-			continue
-		}
-		// For anything else, don't retry
-		break
-	}
-	return NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
-	buf, err := safeReadBytes(size, p.trans)
-	return string(buf), NewTProtocolException(err)
-}
-
-func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
-	PropagateTConfiguration(p.trans, conf)
-	PropagateTConfiguration(p.origTransport, conf)
-	p.cfg = conf
-}
-
-var (
-	_ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
-	_ TConfigurationSetter = (*TBinaryProtocol)(nil)
-)
-
-// This function is shared between TBinaryProtocol and TCompactProtocol.
-//
-// It tries to read size bytes from trans, in a way that prevents large
-// allocations when size is insanely large (mostly caused by malformed message).
-func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
-	if size < 0 {
-		return nil, nil
-	}
-
-	buf := new(bytes.Buffer)
-	_, err := io.CopyN(buf, trans, int64(size))
-	return buf.Bytes(), err
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go
deleted file mode 100644
index aa551b4ab37..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/buffered_transport.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"bufio"
-	"context"
-)
-
-type TBufferedTransportFactory struct {
-	size int
-}
-
-type TBufferedTransport struct {
-	bufio.ReadWriter
-	tp TTransport
-}
-
-func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
-	return NewTBufferedTransport(trans, p.size), nil
-}
-
-func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
-	return &TBufferedTransportFactory{size: bufferSize}
-}
-
-func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
-	return &TBufferedTransport{
-		ReadWriter: bufio.ReadWriter{
-			Reader: bufio.NewReaderSize(trans, bufferSize),
-			Writer: bufio.NewWriterSize(trans, bufferSize),
-		},
-		tp: trans,
-	}
-}
-
-func (p *TBufferedTransport) IsOpen() bool {
-	return p.tp.IsOpen()
-}
-
-func (p *TBufferedTransport) Open() (err error) {
-	return p.tp.Open()
-}
-
-func (p *TBufferedTransport) Close() (err error) {
-	return p.tp.Close()
-}
-
-func (p *TBufferedTransport) Read(b []byte) (int, error) {
-	n, err := p.ReadWriter.Read(b)
-	if err != nil {
-		p.ReadWriter.Reader.Reset(p.tp)
-	}
-	return n, err
-}
-
-func (p *TBufferedTransport) Write(b []byte) (int, error) {
-	n, err := p.ReadWriter.Write(b)
-	if err != nil {
-		p.ReadWriter.Writer.Reset(p.tp)
-	}
-	return n, err
-}
-
-func (p *TBufferedTransport) Flush(ctx context.Context) error {
-	if err := p.ReadWriter.Flush(); err != nil {
-		p.ReadWriter.Writer.Reset(p.tp)
-		return err
-	}
-	return p.tp.Flush(ctx)
-}
-
-func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
-	return p.tp.RemainingBytes()
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (p *TBufferedTransport) SetTConfiguration(conf *TConfiguration) {
-	PropagateTConfiguration(p.tp, conf)
-}
-
-var _ TConfigurationSetter = (*TBufferedTransport)(nil)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go
deleted file mode 100644
index ea2c01fdadb..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/client.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package thrift
-
-import (
-	"context"
-	"fmt"
-)
-
-// ResponseMeta represents the metadata attached to the response.
-type ResponseMeta struct {
-	// The headers in the response, if any.
-	// If the underlying transport/protocol is not THeader, this will always be nil.
-	Headers THeaderMap
-}
-
-type TClient interface {
-	Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
-}
-
-type TStandardClient struct {
-	seqId        int32
-	iprot, oprot TProtocol
-}
-
-// TStandardClient implements TClient, and uses the standard message format for Thrift.
-// It is not safe for concurrent use.
-func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
-	return &TStandardClient{
-		iprot: inputProtocol,
-		oprot: outputProtocol,
-	}
-}
-
-func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
-	// Set headers from context object on THeaderProtocol
-	if headerProt, ok := oprot.(*THeaderProtocol); ok {
-		headerProt.ClearWriteHeaders()
-		for _, key := range GetWriteHeaderList(ctx) {
-			if value, ok := GetHeader(ctx, key); ok {
-				headerProt.SetWriteHeader(key, value)
-			}
-		}
-	}
-
-	if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
-		return err
-	}
-	if err := args.Write(ctx, oprot); err != nil {
-		return err
-	}
-	if err := oprot.WriteMessageEnd(ctx); err != nil {
-		return err
-	}
-	return oprot.Flush(ctx)
-}
-
-func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
-	rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
-	if err != nil {
-		return err
-	}
-
-	if method != rMethod {
-		return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
-	} else if seqId != rSeqId {
-		return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
-	} else if rTypeId == EXCEPTION {
-		var exception tApplicationException
-		if err := exception.Read(ctx, iprot); err != nil {
-			return err
-		}
-
-		if err := iprot.ReadMessageEnd(ctx); err != nil {
-			return err
-		}
-
-		return &exception
-	} else if rTypeId != REPLY {
-		return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
-	}
-
-	if err := result.Read(ctx, iprot); err != nil {
-		return err
-	}
-
-	return iprot.ReadMessageEnd(ctx)
-}
-
-func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
-	p.seqId++
-	seqId := p.seqId
-
-	if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
-		return ResponseMeta{}, err
-	}
-
-	// method is oneway
-	if result == nil {
-		return ResponseMeta{}, nil
-	}
-
-	err := p.Recv(ctx, p.iprot, seqId, method, result)
-	var headers THeaderMap
-	if hp, ok := p.iprot.(*THeaderProtocol); ok {
-		headers = hp.transport.readHeaders
-	}
-	return ResponseMeta{
-		Headers: headers,
-	}, err
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go
deleted file mode 100644
index a49225dabfb..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/compact_protocol.go
+++ /dev/null
@@ -1,865 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"math"
-)
-
-const (
-	COMPACT_PROTOCOL_ID       = 0x082
-	COMPACT_VERSION           = 1
-	COMPACT_VERSION_MASK      = 0x1f
-	COMPACT_TYPE_MASK         = 0x0E0
-	COMPACT_TYPE_BITS         = 0x07
-	COMPACT_TYPE_SHIFT_AMOUNT = 5
-)
-
-type tCompactType byte
-
-const (
-	COMPACT_BOOLEAN_TRUE  = 0x01
-	COMPACT_BOOLEAN_FALSE = 0x02
-	COMPACT_BYTE          = 0x03
-	COMPACT_I16           = 0x04
-	COMPACT_I32           = 0x05
-	COMPACT_I64           = 0x06
-	COMPACT_DOUBLE        = 0x07
-	COMPACT_BINARY        = 0x08
-	COMPACT_LIST          = 0x09
-	COMPACT_SET           = 0x0A
-	COMPACT_MAP           = 0x0B
-	COMPACT_STRUCT        = 0x0C
-)
-
-var (
-	ttypeToCompactType map[TType]tCompactType
-)
-
-func init() {
-	ttypeToCompactType = map[TType]tCompactType{
-		STOP:   STOP,
-		BOOL:   COMPACT_BOOLEAN_TRUE,
-		BYTE:   COMPACT_BYTE,
-		I16:    COMPACT_I16,
-		I32:    COMPACT_I32,
-		I64:    COMPACT_I64,
-		DOUBLE: COMPACT_DOUBLE,
-		STRING: COMPACT_BINARY,
-		LIST:   COMPACT_LIST,
-		SET:    COMPACT_SET,
-		MAP:    COMPACT_MAP,
-		STRUCT: COMPACT_STRUCT,
-	}
-}
-
-type TCompactProtocolFactory struct {
-	cfg *TConfiguration
-}
-
-// Deprecated: Use NewTCompactProtocolFactoryConf instead.
-func NewTCompactProtocolFactory() *TCompactProtocolFactory {
-	return NewTCompactProtocolFactoryConf(&TConfiguration{
-		noPropagation: true,
-	})
-}
-
-func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
-	return &TCompactProtocolFactory{
-		cfg: conf,
-	}
-}
-
-func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
-	return NewTCompactProtocolConf(trans, p.cfg)
-}
-
-func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
-	p.cfg = conf
-}
-
-type TCompactProtocol struct {
-	trans         TRichTransport
-	origTransport TTransport
-
-	cfg *TConfiguration
-
-	// Used to keep track of the last field for the current and previous structs,
-	// so we can do the delta stuff.
-	lastField   []int
-	lastFieldId int
-
-	// If we encounter a boolean field begin, save the TField here so it can
-	// have the value incorporated.
-	booleanFieldName    string
-	booleanFieldId      int16
-	booleanFieldPending bool
-
-	// If we read a field header, and it's a boolean field, save the boolean
-	// value here so that readBool can use it.
-	boolValue          bool
-	boolValueIsNotNull bool
-	buffer             [64]byte
-}
-
-// Deprecated: Use NewTCompactProtocolConf instead.
-func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
-	return NewTCompactProtocolConf(trans, &TConfiguration{
-		noPropagation: true,
-	})
-}
-
-func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
-	PropagateTConfiguration(trans, conf)
-	p := &TCompactProtocol{
-		origTransport: trans,
-		cfg:           conf,
-	}
-	if et, ok := trans.(TRichTransport); ok {
-		p.trans = et
-	} else {
-		p.trans = NewTRichTransport(trans)
-	}
-
-	return p
-}
-
-//
-// Public Writing methods.
-//
-
-// Write a message header to the wire. Compact Protocol messages contain the
-// protocol version so we can migrate forwards in the future if need be.
-func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
-	err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
-	if err != nil {
-		return NewTProtocolException(err)
-	}
-	err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
-	if err != nil {
-		return NewTProtocolException(err)
-	}
-	_, err = p.writeVarint32(seqid)
-	if err != nil {
-		return NewTProtocolException(err)
-	}
-	e := p.WriteString(ctx, name)
-	return e
-
-}
-
-func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
-
-// Write a struct begin. This doesn't actually put anything on the wire. We
-// use it as an opportunity to put special placeholder markers on the field
-// stack so we can get the field id deltas correct.
-func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
-	p.lastField = append(p.lastField, p.lastFieldId)
-	p.lastFieldId = 0
-	return nil
-}
-
-// Write a struct end. This doesn't actually put anything on the wire. We use
-// this as an opportunity to pop the last field from the current struct off
-// of the field stack.
-func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
-	if len(p.lastField) <= 0 {
-		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
-	}
-	p.lastFieldId = p.lastField[len(p.lastField)-1]
-	p.lastField = p.lastField[:len(p.lastField)-1]
-	return nil
-}
-
-func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
-	if typeId == BOOL {
-		// we want to possibly include the value, so we'll wait.
-		p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
-		return nil
-	}
-	_, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
-	return NewTProtocolException(err)
-}
-
-// The workhorse of writeFieldBegin. It has the option of doing a
-// 'type override' of the type header. This is used specifically in the
-// boolean field case.
-func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
-	// short lastField = lastField_.pop();
-
-	// if there's a type override, use that.
-	var typeToWrite byte
-	if typeOverride == 0xFF {
-		typeToWrite = byte(p.getCompactType(typeId))
-	} else {
-		typeToWrite = typeOverride
-	}
-	// check if we can use delta encoding for the field id
-	fieldId := int(id)
-	written := 0
-	if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
-		// write them together
-		err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
-		if err != nil {
-			return 0, err
-		}
-	} else {
-		// write them separate
-		err := p.writeByteDirect(typeToWrite)
-		if err != nil {
-			return 0, err
-		}
-		err = p.WriteI16(ctx, id)
-		written = 1 + 2
-		if err != nil {
-			return 0, err
-		}
-	}
-
-	p.lastFieldId = fieldId
-	return written, nil
-}
-
-func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
-
-func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
-	err := p.writeByteDirect(STOP)
-	return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
-	if size == 0 {
-		err := p.writeByteDirect(0)
-		return NewTProtocolException(err)
-	}
-	_, err := p.writeVarint32(int32(size))
-	if err != nil {
-		return NewTProtocolException(err)
-	}
-	err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
-	return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
-
-// Write a list header.
-func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
-	_, err := p.writeCollectionBegin(elemType, size)
-	return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
-
-// Write a set header.
-func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
-	_, err := p.writeCollectionBegin(elemType, size)
-	return NewTProtocolException(err)
-}
-
-func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
-
-func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
-	v := byte(COMPACT_BOOLEAN_FALSE)
-	if value {
-		v = byte(COMPACT_BOOLEAN_TRUE)
-	}
-	if p.booleanFieldPending {
-		// we haven't written the field header yet
-		_, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
-		p.booleanFieldPending = false
-		return NewTProtocolException(err)
-	}
-	// we're not part of a field, so just write the value.
-	err := p.writeByteDirect(v)
-	return NewTProtocolException(err)
-}
-
-// Write a byte. Nothing to see here!
-func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
-	err := p.writeByteDirect(byte(value))
-	return NewTProtocolException(err)
-}
-
-// Write an I16 as a zigzag varint.
-func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
-	_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
-	return NewTProtocolException(err)
-}
-
-// Write an i32 as a zigzag varint.
-func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
-	_, err := p.writeVarint32(p.int32ToZigzag(value))
-	return NewTProtocolException(err)
-}
-
-// Write an i64 as a zigzag varint.
-func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
-	_, err := p.writeVarint64(p.int64ToZigzag(value))
-	return NewTProtocolException(err)
-}
-
-// Write a double to the wire as 8 bytes.
-func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
-	buf := p.buffer[0:8]
-	binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
-	_, err := p.trans.Write(buf)
-	return NewTProtocolException(err)
-}
-
-// Write a string to the wire with a varint size preceding.
-func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
-	_, e := p.writeVarint32(int32(len(value)))
-	if e != nil {
-		return NewTProtocolException(e)
-	}
-	if len(value) > 0 {
-	}
-	_, e = p.trans.WriteString(value)
-	return e
-}
-
-// Write a byte array, using a varint for the size.
-func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
-	_, e := p.writeVarint32(int32(len(bin)))
-	if e != nil {
-		return NewTProtocolException(e)
-	}
-	if len(bin) > 0 {
-		_, e = p.trans.Write(bin)
-		return NewTProtocolException(e)
-	}
-	return nil
-}
-
-//
-// Reading methods.
-//
-
-// Read a message header.
-func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
-	var protocolId byte
-
-	_, deadlineSet := ctx.Deadline()
-	for {
-		protocolId, err = p.readByteDirect()
-		if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
-			// keep retrying I/O timeout errors since we still have
-			// time left
-			continue
-		}
-		// For anything else, don't retry
-		break
-	}
-	if err != nil {
-		return
-	}
-
-	if protocolId != COMPACT_PROTOCOL_ID {
-		e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
-		return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
-	}
-
-	versionAndType, err := p.readByteDirect()
-	if err != nil {
-		return
-	}
-
-	version := versionAndType & COMPACT_VERSION_MASK
-	typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
-	if version != COMPACT_VERSION {
-		e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
-		err = NewTProtocolExceptionWithType(BAD_VERSION, e)
-		return
-	}
-	seqId, e := p.readVarint32()
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	name, err = p.ReadString(ctx)
-	return
-}
-
-func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
-
-// Read a struct begin. There's nothing on the wire for this, but it is our
-// opportunity to push a new struct begin marker onto the field stack.
-func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
-	p.lastField = append(p.lastField, p.lastFieldId)
-	p.lastFieldId = 0
-	return
-}
-
-// Doesn't actually consume any wire data, just removes the last field for
-// this struct from the field stack.
-func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
-	// consume the last field we read off the wire.
-	if len(p.lastField) <= 0 {
-		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
-	}
-	p.lastFieldId = p.lastField[len(p.lastField)-1]
-	p.lastField = p.lastField[:len(p.lastField)-1]
-	return nil
-}
-
-// Read a field header off the wire.
-func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
-	t, err := p.readByteDirect()
-	if err != nil {
-		return
-	}
-
-	// if it's a stop, then we can return immediately, as the struct is over.
-	if (t & 0x0f) == STOP {
-		return "", STOP, 0, nil
-	}
-
-	// mask off the 4 MSB of the type header. it could contain a field id delta.
-	modifier := int16((t & 0xf0) >> 4)
-	if modifier == 0 {
-		// not a delta. look ahead for the zigzag varint field id.
-		id, err = p.ReadI16(ctx)
-		if err != nil {
-			return
-		}
-	} else {
-		// has a delta. add the delta to the last read field id.
-		id = int16(p.lastFieldId) + modifier
-	}
-	typeId, e := p.getTType(tCompactType(t & 0x0f))
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-
-	// if this happens to be a boolean field, the value is encoded in the type
-	if p.isBoolType(t) {
-		// save the boolean value in a special instance variable.
-		p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
-		p.boolValueIsNotNull = true
-	}
-
-	// push the new field onto the field stack so we can keep the deltas going.
-	p.lastFieldId = int(id)
-	return
-}
-
-func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
-
-// Read a map header off the wire. If the size is zero, skip reading the key
-// and value type. This means that 0-length maps will yield TMaps without the
-// "correct" types.
-func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
-	size32, e := p.readVarint32()
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	if size32 < 0 {
-		err = invalidDataLength
-		return
-	}
-	size = int(size32)
-
-	keyAndValueType := byte(STOP)
-	if size != 0 {
-		keyAndValueType, err = p.readByteDirect()
-		if err != nil {
-			return
-		}
-	}
-	keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
-	valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
-	return
-}
-
-func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
-
-// Read a list header off the wire. If the list size is 0-14, the size will
-// be packed into the element type header. If it's a longer list, the 4 MSB
-// of the element type header will be 0xF, and a varint will follow with the
-// true size.
-func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
-	size_and_type, err := p.readByteDirect()
-	if err != nil {
-		return
-	}
-	size = int((size_and_type >> 4) & 0x0f)
-	if size == 15 {
-		size2, e := p.readVarint32()
-		if e != nil {
-			err = NewTProtocolException(e)
-			return
-		}
-		if size2 < 0 {
-			err = invalidDataLength
-			return
-		}
-		size = int(size2)
-	}
-	elemType, e := p.getTType(tCompactType(size_and_type))
-	if e != nil {
-		err = NewTProtocolException(e)
-		return
-	}
-	return
-}
-
-func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
-
-// Read a set header off the wire. If the set size is 0-14, the size will
-// be packed into the element type header. If it's a longer set, the 4 MSB
-// of the element type header will be 0xF, and a varint will follow with the
-// true size.
-func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
-	return p.ReadListBegin(ctx)
-}
-
-func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
-
-// Read a boolean off the wire. If this is a boolean field, the value should
-// already have been read during readFieldBegin, so we'll just consume the
-// pre-stored value. Otherwise, read a byte.
-func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
-	if p.boolValueIsNotNull {
-		p.boolValueIsNotNull = false
-		return p.boolValue, nil
-	}
-	v, err := p.readByteDirect()
-	return v == COMPACT_BOOLEAN_TRUE, err
-}
-
-// Read a single byte off the wire. Nothing interesting here.
-func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
-	v, err := p.readByteDirect()
-	if err != nil {
-		return 0, NewTProtocolException(err)
-	}
-	return int8(v), err
-}
-
-// Read an i16 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
-	v, err := p.ReadI32(ctx)
-	return int16(v), err
-}
-
-// Read an i32 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
-	v, e := p.readVarint32()
-	if e != nil {
-		return 0, NewTProtocolException(e)
-	}
-	value = p.zigzagToInt32(v)
-	return value, nil
-}
-
-// Read an i64 from the wire as a zigzag varint.
-func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
-	v, e := p.readVarint64()
-	if e != nil {
-		return 0, NewTProtocolException(e)
-	}
-	value = p.zigzagToInt64(v)
-	return value, nil
-}
-
-// No magic here - just read a double off the wire.
-func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
-	longBits := p.buffer[0:8]
-	_, e := io.ReadFull(p.trans, longBits)
-	if e != nil {
-		return 0.0, NewTProtocolException(e)
-	}
-	return math.Float64frombits(p.bytesToUint64(longBits)), nil
-}
-
-// Reads a []byte (via readBinary), and then UTF-8 decodes it.
-func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
-	length, e := p.readVarint32()
-	if e != nil {
-		return "", NewTProtocolException(e)
-	}
-	err = checkSizeForProtocol(length, p.cfg)
-	if err != nil {
-		return
-	}
-	if length == 0 {
-		return "", nil
-	}
-	if length < int32(len(p.buffer)) {
-		// Avoid allocation on small reads
-		buf := p.buffer[:length]
-		read, e := io.ReadFull(p.trans, buf)
-		return string(buf[:read]), NewTProtocolException(e)
-	}
-
-	buf, e := safeReadBytes(length, p.trans)
-	return string(buf), NewTProtocolException(e)
-}
-
-// Read a []byte from the wire.
-func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
-	length, e := p.readVarint32()
-	if e != nil {
-		return nil, NewTProtocolException(e)
-	}
-	err = checkSizeForProtocol(length, p.cfg)
-	if err != nil {
-		return
-	}
-	if length == 0 {
-		return []byte{}, nil
-	}
-
-	buf, e := safeReadBytes(length, p.trans)
-	return buf, NewTProtocolException(e)
-}
-
-func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
-	return NewTProtocolException(p.trans.Flush(ctx))
-}
-
-func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
-	return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TCompactProtocol) Transport() TTransport {
-	return p.origTransport
-}
-
-//
-// Internal writing methods
-//
-
-// Abstract method for writing the start of lists and sets. List and sets on
-// the wire differ only by the type indicator.
-func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
-	if size <= 14 {
-		return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
-	}
-	err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
-	if err != nil {
-		return 0, err
-	}
-	m, err := p.writeVarint32(int32(size))
-	return 1 + m, err
-}
-
-// Write an i32 as a varint. Results in 1-5 bytes on the wire.
-// TODO(pomack): make a permanent buffer like writeVarint64?
-func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
-	i32buf := p.buffer[0:5]
-	idx := 0
-	for {
-		if (n & ^0x7F) == 0 {
-			i32buf[idx] = byte(n)
-			idx++
-			// p.writeByteDirect(byte(n));
-			break
-			// return;
-		} else {
-			i32buf[idx] = byte((n & 0x7F) | 0x80)
-			idx++
-			// p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
-			u := uint32(n)
-			n = int32(u >> 7)
-		}
-	}
-	return p.trans.Write(i32buf[0:idx])
-}
-
-// Write an i64 as a varint. Results in 1-10 bytes on the wire.
-func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
-	varint64out := p.buffer[0:10]
-	idx := 0
-	for {
-		if (n & ^0x7F) == 0 {
-			varint64out[idx] = byte(n)
-			idx++
-			break
-		} else {
-			varint64out[idx] = byte((n & 0x7F) | 0x80)
-			idx++
-			u := uint64(n)
-			n = int64(u >> 7)
-		}
-	}
-	return p.trans.Write(varint64out[0:idx])
-}
-
-// Convert l into a zigzag long. This allows negative numbers to be
-// represented compactly as a varint.
-func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
-	return (l << 1) ^ (l >> 63)
-}
-
-// Convert l into a zigzag long. This allows negative numbers to be
-// represented compactly as a varint.
-func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
-	return (n << 1) ^ (n >> 31)
-}
-
-func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
-	binary.LittleEndian.PutUint64(buf, n)
-}
-
-func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
-	binary.LittleEndian.PutUint64(buf, uint64(n))
-}
-
-// Writes a byte without any possibility of all that field header nonsense.
-// Used internally by other writing methods that know they need to write a byte.
-func (p *TCompactProtocol) writeByteDirect(b byte) error {
-	return p.trans.WriteByte(b)
-}
-
-// Writes a byte without any possibility of all that field header nonsense.
-func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
-	return 1, p.writeByteDirect(byte(n))
-}
-
-//
-// Internal reading methods
-//
-
-// Read an i32 from the wire as a varint. The MSB of each byte is set
-// if there is another byte to follow. This can read up to 5 bytes.
-func (p *TCompactProtocol) readVarint32() (int32, error) {
-	// if the wire contains the right stuff, this will just truncate the i64 we
-	// read and get us the right sign.
-	v, err := p.readVarint64()
-	return int32(v), err
-}
-
-// Read an i64 from the wire as a proper varint. The MSB of each byte is set
-// if there is another byte to follow. This can read up to 10 bytes.
-func (p *TCompactProtocol) readVarint64() (int64, error) {
-	shift := uint(0)
-	result := int64(0)
-	for {
-		b, err := p.readByteDirect()
-		if err != nil {
-			return 0, err
-		}
-		result |= int64(b&0x7f) << shift
-		if (b & 0x80) != 0x80 {
-			break
-		}
-		shift += 7
-	}
-	return result, nil
-}
-
-// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
-func (p *TCompactProtocol) readByteDirect() (byte, error) {
-	return p.trans.ReadByte()
-}
-
-//
-// encoding helpers
-//
-
-// Convert from zigzag int to int.
-func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
-	u := uint32(n)
-	return int32(u>>1) ^ -(n & 1)
-}
-
-// Convert from zigzag long to long.
-func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
-	u := uint64(n)
-	return int64(u>>1) ^ -(n & 1)
-}
-
-// Note that it's important that the mask bytes are long literals,
-// otherwise they'll default to ints, and when you shift an int left 56 bits,
-// you just get a messed up int.
-func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
-	return int64(binary.LittleEndian.Uint64(b))
-}
-
-// Note that it's important that the mask bytes are long literals,
-// otherwise they'll default to ints, and when you shift an int left 56 bits,
-// you just get a messed up int.
-func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
-	return binary.LittleEndian.Uint64(b)
-}
-
-//
-// type testing and converting
-//
-
-func (p *TCompactProtocol) isBoolType(b byte) bool {
-	return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
-}
-
-// Given a tCompactType constant, convert it to its corresponding
-// TType value.
-func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
-	switch byte(t) & 0x0f {
-	case STOP:
-		return STOP, nil
-	case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
-		return BOOL, nil
-	case COMPACT_BYTE:
-		return BYTE, nil
-	case COMPACT_I16:
-		return I16, nil
-	case COMPACT_I32:
-		return I32, nil
-	case COMPACT_I64:
-		return I64, nil
-	case COMPACT_DOUBLE:
-		return DOUBLE, nil
-	case COMPACT_BINARY:
-		return STRING, nil
-	case COMPACT_LIST:
-		return LIST, nil
-	case COMPACT_SET:
-		return SET, nil
-	case COMPACT_MAP:
-		return MAP, nil
-	case COMPACT_STRUCT:
-		return STRUCT, nil
-	}
-	return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
-}
-
-// Given a TType value, find the appropriate TCompactProtocol.Types constant.
-func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
-	return ttypeToCompactType[t]
-}
-
-func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
-	PropagateTConfiguration(p.trans, conf)
-	PropagateTConfiguration(p.origTransport, conf)
-	p.cfg = conf
-}
-
-var (
-	_ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
-	_ TConfigurationSetter = (*TCompactProtocol)(nil)
-)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go
deleted file mode 100644
index 454d9f37748..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/configuration.go
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"crypto/tls"
-	"fmt"
-	"time"
-)
-
-// Default TConfiguration values.
-const (
-	DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
-	DEFAULT_MAX_FRAME_SIZE   = 16384000
-
-	DEFAULT_TBINARY_STRICT_READ  = false
-	DEFAULT_TBINARY_STRICT_WRITE = true
-
-	DEFAULT_CONNECT_TIMEOUT = 0
-	DEFAULT_SOCKET_TIMEOUT  = 0
-)
-
-// TConfiguration defines some configurations shared between TTransport,
-// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
-//
-// When constructing TConfiguration, you only need to specify the non-default
-// fields. All zero values have sane default values.
-//
-// Not all configurations defined are applicable to all implementations.
-// Implementations are free to ignore the configurations not applicable to them.
-//
-// All functions attached to this type are nil-safe.
-//
-// See [1] for spec.
-//
-// NOTE: When using TConfiguration, fill in all the configurations you want to
-// set across the stack, not only the ones you want to set in the immediate
-// TTransport/TProtocol.
-//
-// For example, say you want to migrate this old code into using TConfiguration:
-//
-//     sccket := thrift.NewTSocketTimeout("host:port", time.Second)
-//     transFactory := thrift.NewTFramedTransportFactoryMaxLength(
-//         thrift.NewTTransportFactory(),
-//         1024 * 1024 * 256,
-//     )
-//     protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
-//
-// This is the wrong way to do it because in the end the TConfiguration used by
-// socket and transFactory will be overwritten by the one used by protoFactory
-// because of TConfiguration propagation:
-//
-//     // bad example, DO NOT USE
-//     sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
-//         ConnectTimeout: time.Second,
-//         SocketTimeout:  time.Second,
-//     })
-//     transFactory := thrift.NewTFramedTransportFactoryConf(
-//         thrift.NewTTransportFactory(),
-//         &thrift.TConfiguration{
-//             MaxFrameSize: 1024 * 1024 * 256,
-//         },
-//     )
-//     protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
-//         TBinaryStrictRead:  thrift.BoolPtr(true),
-//         TBinaryStrictWrite: thrift.BoolPtr(true),
-//     })
-//
-// This is the correct way to do it:
-//
-//     conf := &thrift.TConfiguration{
-//         ConnectTimeout: time.Second,
-//         SocketTimeout:  time.Second,
-//
-//         MaxFrameSize: 1024 * 1024 * 256,
-//
-//         TBinaryStrictRead:  thrift.BoolPtr(true),
-//         TBinaryStrictWrite: thrift.BoolPtr(true),
-//     }
-//     sccket := thrift.NewTSocketConf("host:port", conf)
-//     transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
-//     protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
-//
-// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
-type TConfiguration struct {
-	// If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
-	MaxMessageSize int32
-
-	// If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
-	//
-	// Also if MaxMessageSize < MaxFrameSize,
-	// MaxMessageSize will be used instead.
-	MaxFrameSize int32
-
-	// Connect and socket timeouts to be used by TSocket and TSSLSocket.
-	//
-	// 0 means no timeout.
-	//
-	// If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
-	// used.
-	ConnectTimeout time.Duration
-	SocketTimeout  time.Duration
-
-	// TLS config to be used by TSSLSocket.
-	TLSConfig *tls.Config
-
-	// Strict read/write configurations for TBinaryProtocol.
-	//
-	// BoolPtr helper function is available to use literal values.
-	TBinaryStrictRead  *bool
-	TBinaryStrictWrite *bool
-
-	// The wrapped protocol id to be used in THeader transport/protocol.
-	//
-	// THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
-	// are provided to help filling this value.
-	THeaderProtocolID *THeaderProtocolID
-
-	// Used internally by deprecated constructors, to avoid overriding
-	// underlying TTransport/TProtocol's cfg by accidental propagations.
-	//
-	// For external users this is always false.
-	noPropagation bool
-}
-
-// GetMaxMessageSize returns the max message size an implementation should
-// follow.
-//
-// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
-func (tc *TConfiguration) GetMaxMessageSize() int32 {
-	if tc == nil || tc.MaxMessageSize <= 0 {
-		return DEFAULT_MAX_MESSAGE_SIZE
-	}
-	return tc.MaxMessageSize
-}
-
-// GetMaxFrameSize returns the max frame size an implementation should follow.
-//
-// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
-//
-// If the configured max message size is smaller than the configured max frame
-// size, the smaller one will be returned instead.
-func (tc *TConfiguration) GetMaxFrameSize() int32 {
-	if tc == nil {
-		return DEFAULT_MAX_FRAME_SIZE
-	}
-	maxFrameSize := tc.MaxFrameSize
-	if maxFrameSize <= 0 {
-		maxFrameSize = DEFAULT_MAX_FRAME_SIZE
-	}
-	if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
-		return maxMessageSize
-	}
-	return maxFrameSize
-}
-
-// GetConnectTimeout returns the connect timeout should be used by TSocket and
-// TSSLSocket.
-//
-// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
-func (tc *TConfiguration) GetConnectTimeout() time.Duration {
-	if tc == nil || tc.ConnectTimeout < 0 {
-		return DEFAULT_CONNECT_TIMEOUT
-	}
-	return tc.ConnectTimeout
-}
-
-// GetSocketTimeout returns the socket timeout should be used by TSocket and
-// TSSLSocket.
-//
-// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
-func (tc *TConfiguration) GetSocketTimeout() time.Duration {
-	if tc == nil || tc.SocketTimeout < 0 {
-		return DEFAULT_SOCKET_TIMEOUT
-	}
-	return tc.SocketTimeout
-}
-
-// GetTLSConfig returns the tls config should be used by TSSLSocket.
-//
-// It's nil-safe. If tc is nil, nil will be returned instead.
-func (tc *TConfiguration) GetTLSConfig() *tls.Config {
-	if tc == nil {
-		return nil
-	}
-	return tc.TLSConfig
-}
-
-// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
-// should follow.
-//
-// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
-// tc.TBinaryStrictRead is nil.
-func (tc *TConfiguration) GetTBinaryStrictRead() bool {
-	if tc == nil || tc.TBinaryStrictRead == nil {
-		return DEFAULT_TBINARY_STRICT_READ
-	}
-	return *tc.TBinaryStrictRead
-}
-
-// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
-// should follow.
-//
-// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
-// tc.TBinaryStrictWrite is nil.
-func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
-	if tc == nil || tc.TBinaryStrictWrite == nil {
-		return DEFAULT_TBINARY_STRICT_WRITE
-	}
-	return *tc.TBinaryStrictWrite
-}
-
-// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
-// THeaderProtocol clients (for servers, they always use the same one as the
-// client instead).
-//
-// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
-// THeaderProtocolDefault will be returned instead.
-// THeaderProtocolDefault will also be returned if configured value is invalid.
-func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
-	if tc == nil || tc.THeaderProtocolID == nil {
-		return THeaderProtocolDefault
-	}
-	protoID := *tc.THeaderProtocolID
-	if err := protoID.Validate(); err != nil {
-		return THeaderProtocolDefault
-	}
-	return protoID
-}
-
-// THeaderProtocolIDPtr validates and returns the pointer to id.
-//
-// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
-// and the validation error will be returned.
-func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
-	err := id.Validate()
-	if err != nil {
-		id = THeaderProtocolDefault
-	}
-	return &id, err
-}
-
-// THeaderProtocolIDPtrMust validates and returns the pointer to id.
-//
-// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
-// instead of returning them.
-func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
-	ptr, err := THeaderProtocolIDPtr(id)
-	if err != nil {
-		panic(err)
-	}
-	return ptr
-}
-
-// TConfigurationSetter is an optional interface TProtocol, TTransport,
-// TProtocolFactory, TTransportFactory, and other implementations can implement.
-//
-// It's intended to be called during intializations.
-// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
-// middle of a message is undefined:
-// It may or may not change the behavior of the current processing message,
-// and it may even cause the current message to fail.
-//
-// Note for implementations: SetTConfiguration might be called multiple times
-// with the same value in quick successions due to the implementation of the
-// propagation. Implementations should make SetTConfiguration as simple as
-// possible (usually just overwrite the stored configuration and propagate it to
-// the wrapped TTransports/TProtocols).
-type TConfigurationSetter interface {
-	SetTConfiguration(*TConfiguration)
-}
-
-// PropagateTConfiguration propagates cfg to impl if impl implements
-// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
-//
-// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
-// with everything being default value, use &TConfiguration{} explicitly instead.
-func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
-	if cfg == nil || cfg.noPropagation {
-		return
-	}
-
-	if setter, ok := impl.(TConfigurationSetter); ok {
-		setter.SetTConfiguration(cfg)
-	}
-}
-
-func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
-	if size < 0 {
-		return NewTProtocolExceptionWithType(
-			NEGATIVE_SIZE,
-			fmt.Errorf("negative size: %d", size),
-		)
-	}
-	if size > cfg.GetMaxMessageSize() {
-		return NewTProtocolExceptionWithType(
-			SIZE_LIMIT,
-			fmt.Errorf("size exceeded max allowed: %d", size),
-		)
-	}
-	return nil
-}
-
-type tTransportFactoryConf struct {
-	delegate TTransportFactory
-	cfg      *TConfiguration
-}
-
-func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
-	trans, err := f.delegate.GetTransport(orig)
-	if err == nil {
-		PropagateTConfiguration(orig, f.cfg)
-		PropagateTConfiguration(trans, f.cfg)
-	}
-	return trans, err
-}
-
-func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
-	PropagateTConfiguration(f.delegate, f.cfg)
-	f.cfg = cfg
-}
-
-// TTransportFactoryConf wraps a TTransportFactory to propagate
-// TConfiguration on the factory's GetTransport calls.
-func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
-	return &tTransportFactoryConf{
-		delegate: delegate,
-		cfg:      conf,
-	}
-}
-
-type tProtocolFactoryConf struct {
-	delegate TProtocolFactory
-	cfg      *TConfiguration
-}
-
-func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
-	proto := f.delegate.GetProtocol(trans)
-	PropagateTConfiguration(trans, f.cfg)
-	PropagateTConfiguration(proto, f.cfg)
-	return proto
-}
-
-func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
-	PropagateTConfiguration(f.delegate, f.cfg)
-	f.cfg = cfg
-}
-
-// TProtocolFactoryConf wraps a TProtocolFactory to propagate
-// TConfiguration on the factory's GetProtocol calls.
-func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
-	return &tProtocolFactoryConf{
-		delegate: delegate,
-		cfg:      conf,
-	}
-}
-
-var (
-	_ TConfigurationSetter = (*tTransportFactoryConf)(nil)
-	_ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
-)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go
deleted file mode 100644
index d15c1bcf894..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/context.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import "context"
-
-var defaultCtx = context.Background()
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go
deleted file mode 100644
index fdf9bfec15e..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/debug_protocol.go
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"fmt"
-)
-
-type TDebugProtocol struct {
-	// Required. The actual TProtocol to do the read/write.
-	Delegate TProtocol
-
-	// Optional. The logger and prefix to log all the args/return values
-	// from Delegate TProtocol calls.
-	//
-	// If Logger is nil, StdLogger using stdlib log package with os.Stderr
-	// will be used. If disable logging is desired, set Logger to NopLogger
-	// explicitly instead of leaving it as nil/unset.
-	Logger    Logger
-	LogPrefix string
-
-	// Optional. An TProtocol to duplicate everything read/written from Delegate.
-	//
-	// A typical use case of this is to use TSimpleJSONProtocol wrapping
-	// TMemoryBuffer in a middleware to json logging requests/responses.
-	//
-	// This feature is not available from TDebugProtocolFactory. In order to
-	// use it you have to construct TDebugProtocol directly, or set DuplicateTo
-	// field after getting a TDebugProtocol from the factory.
-	DuplicateTo TProtocol
-}
-
-type TDebugProtocolFactory struct {
-	Underlying TProtocolFactory
-	LogPrefix  string
-	Logger     Logger
-}
-
-// NewTDebugProtocolFactory creates a TDebugProtocolFactory.
-//
-// Deprecated: Please use NewTDebugProtocolFactoryWithLogger or the struct
-// itself instead. This version will use the default logger from standard
-// library.
-func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
-	return &TDebugProtocolFactory{
-		Underlying: underlying,
-		LogPrefix:  logPrefix,
-		Logger:     StdLogger(nil),
-	}
-}
-
-// NewTDebugProtocolFactoryWithLogger creates a TDebugProtocolFactory.
-func NewTDebugProtocolFactoryWithLogger(underlying TProtocolFactory, logPrefix string, logger Logger) *TDebugProtocolFactory {
-	return &TDebugProtocolFactory{
-		Underlying: underlying,
-		LogPrefix:  logPrefix,
-		Logger:     logger,
-	}
-}
-
-func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
-	return &TDebugProtocol{
-		Delegate:  t.Underlying.GetProtocol(trans),
-		LogPrefix: t.LogPrefix,
-		Logger:    fallbackLogger(t.Logger),
-	}
-}
-
-func (tdp *TDebugProtocol) logf(format string, v ...interface{}) {
-	fallbackLogger(tdp.Logger)(fmt.Sprintf(format, v...))
-}
-
-func (tdp *TDebugProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
-	err := tdp.Delegate.WriteMessageBegin(ctx, name, typeId, seqid)
-	tdp.logf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteMessageEnd(ctx context.Context) error {
-	err := tdp.Delegate.WriteMessageEnd(ctx)
-	tdp.logf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteMessageEnd(ctx)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteStructBegin(ctx context.Context, name string) error {
-	err := tdp.Delegate.WriteStructBegin(ctx, name)
-	tdp.logf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteStructBegin(ctx, name)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteStructEnd(ctx context.Context) error {
-	err := tdp.Delegate.WriteStructEnd(ctx)
-	tdp.logf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteStructEnd(ctx)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
-	err := tdp.Delegate.WriteFieldBegin(ctx, name, typeId, id)
-	tdp.logf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteFieldEnd(ctx context.Context) error {
-	err := tdp.Delegate.WriteFieldEnd(ctx)
-	tdp.logf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteFieldEnd(ctx)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteFieldStop(ctx context.Context) error {
-	err := tdp.Delegate.WriteFieldStop(ctx)
-	tdp.logf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteFieldStop(ctx)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
-	err := tdp.Delegate.WriteMapBegin(ctx, keyType, valueType, size)
-	tdp.logf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteMapEnd(ctx context.Context) error {
-	err := tdp.Delegate.WriteMapEnd(ctx)
-	tdp.logf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteMapEnd(ctx)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
-	err := tdp.Delegate.WriteListBegin(ctx, elemType, size)
-	tdp.logf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteListBegin(ctx, elemType, size)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteListEnd(ctx context.Context) error {
-	err := tdp.Delegate.WriteListEnd(ctx)
-	tdp.logf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteListEnd(ctx)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
-	err := tdp.Delegate.WriteSetBegin(ctx, elemType, size)
-	tdp.logf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteSetEnd(ctx context.Context) error {
-	err := tdp.Delegate.WriteSetEnd(ctx)
-	tdp.logf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteSetEnd(ctx)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteBool(ctx context.Context, value bool) error {
-	err := tdp.Delegate.WriteBool(ctx, value)
-	tdp.logf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteBool(ctx, value)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteByte(ctx context.Context, value int8) error {
-	err := tdp.Delegate.WriteByte(ctx, value)
-	tdp.logf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteByte(ctx, value)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteI16(ctx context.Context, value int16) error {
-	err := tdp.Delegate.WriteI16(ctx, value)
-	tdp.logf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteI16(ctx, value)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteI32(ctx context.Context, value int32) error {
-	err := tdp.Delegate.WriteI32(ctx, value)
-	tdp.logf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteI32(ctx, value)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteI64(ctx context.Context, value int64) error {
-	err := tdp.Delegate.WriteI64(ctx, value)
-	tdp.logf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteI64(ctx, value)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteDouble(ctx context.Context, value float64) error {
-	err := tdp.Delegate.WriteDouble(ctx, value)
-	tdp.logf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteDouble(ctx, value)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteString(ctx context.Context, value string) error {
-	err := tdp.Delegate.WriteString(ctx, value)
-	tdp.logf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteString(ctx, value)
-	}
-	return err
-}
-func (tdp *TDebugProtocol) WriteBinary(ctx context.Context, value []byte) error {
-	err := tdp.Delegate.WriteBinary(ctx, value)
-	tdp.logf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteBinary(ctx, value)
-	}
-	return err
-}
-
-func (tdp *TDebugProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) {
-	name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin(ctx)
-	tdp.logf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteMessageBegin(ctx, name, typeId, seqid)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadMessageEnd(ctx context.Context) (err error) {
-	err = tdp.Delegate.ReadMessageEnd(ctx)
-	tdp.logf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteMessageEnd(ctx)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
-	name, err = tdp.Delegate.ReadStructBegin(ctx)
-	tdp.logf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteStructBegin(ctx, name)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadStructEnd(ctx context.Context) (err error) {
-	err = tdp.Delegate.ReadStructEnd(ctx)
-	tdp.logf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteStructEnd(ctx)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
-	name, typeId, id, err = tdp.Delegate.ReadFieldBegin(ctx)
-	tdp.logf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteFieldBegin(ctx, name, typeId, id)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadFieldEnd(ctx context.Context) (err error) {
-	err = tdp.Delegate.ReadFieldEnd(ctx)
-	tdp.logf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteFieldEnd(ctx)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
-	keyType, valueType, size, err = tdp.Delegate.ReadMapBegin(ctx)
-	tdp.logf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteMapBegin(ctx, keyType, valueType, size)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadMapEnd(ctx context.Context) (err error) {
-	err = tdp.Delegate.ReadMapEnd(ctx)
-	tdp.logf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteMapEnd(ctx)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
-	elemType, size, err = tdp.Delegate.ReadListBegin(ctx)
-	tdp.logf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteListBegin(ctx, elemType, size)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadListEnd(ctx context.Context) (err error) {
-	err = tdp.Delegate.ReadListEnd(ctx)
-	tdp.logf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteListEnd(ctx)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
-	elemType, size, err = tdp.Delegate.ReadSetBegin(ctx)
-	tdp.logf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteSetBegin(ctx, elemType, size)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadSetEnd(ctx context.Context) (err error) {
-	err = tdp.Delegate.ReadSetEnd(ctx)
-	tdp.logf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteSetEnd(ctx)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadBool(ctx context.Context) (value bool, err error) {
-	value, err = tdp.Delegate.ReadBool(ctx)
-	tdp.logf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteBool(ctx, value)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadByte(ctx context.Context) (value int8, err error) {
-	value, err = tdp.Delegate.ReadByte(ctx)
-	tdp.logf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteByte(ctx, value)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadI16(ctx context.Context) (value int16, err error) {
-	value, err = tdp.Delegate.ReadI16(ctx)
-	tdp.logf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteI16(ctx, value)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadI32(ctx context.Context) (value int32, err error) {
-	value, err = tdp.Delegate.ReadI32(ctx)
-	tdp.logf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteI32(ctx, value)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadI64(ctx context.Context) (value int64, err error) {
-	value, err = tdp.Delegate.ReadI64(ctx)
-	tdp.logf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteI64(ctx, value)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
-	value, err = tdp.Delegate.ReadDouble(ctx)
-	tdp.logf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteDouble(ctx, value)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadString(ctx context.Context) (value string, err error) {
-	value, err = tdp.Delegate.ReadString(ctx)
-	tdp.logf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteString(ctx, value)
-	}
-	return
-}
-func (tdp *TDebugProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
-	value, err = tdp.Delegate.ReadBinary(ctx)
-	tdp.logf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.WriteBinary(ctx, value)
-	}
-	return
-}
-func (tdp *TDebugProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
-	err = tdp.Delegate.Skip(ctx, fieldType)
-	tdp.logf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.Skip(ctx, fieldType)
-	}
-	return
-}
-func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) {
-	err = tdp.Delegate.Flush(ctx)
-	tdp.logf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
-	if tdp.DuplicateTo != nil {
-		tdp.DuplicateTo.Flush(ctx)
-	}
-	return
-}
-
-func (tdp *TDebugProtocol) Transport() TTransport {
-	return tdp.Delegate.Transport()
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (tdp *TDebugProtocol) SetTConfiguration(conf *TConfiguration) {
-	PropagateTConfiguration(tdp.Delegate, conf)
-	PropagateTConfiguration(tdp.DuplicateTo, conf)
-}
-
-var _ TConfigurationSetter = (*TDebugProtocol)(nil)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go
deleted file mode 100644
index cefc7ecda5d..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/deserializer.go
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"sync"
-)
-
-type TDeserializer struct {
-	Transport *TMemoryBuffer
-	Protocol  TProtocol
-}
-
-func NewTDeserializer() *TDeserializer {
-	transport := NewTMemoryBufferLen(1024)
-	protocol := NewTBinaryProtocolTransport(transport)
-
-	return &TDeserializer{
-		Transport: transport,
-		Protocol:  protocol,
-	}
-}
-
-func (t *TDeserializer) ReadString(ctx context.Context, msg TStruct, s string) (err error) {
-	t.Transport.Reset()
-
-	err = nil
-	if _, err = t.Transport.Write([]byte(s)); err != nil {
-		return
-	}
-	if err = msg.Read(ctx, t.Protocol); err != nil {
-		return
-	}
-	return
-}
-
-func (t *TDeserializer) Read(ctx context.Context, msg TStruct, b []byte) (err error) {
-	t.Transport.Reset()
-
-	err = nil
-	if _, err = t.Transport.Write(b); err != nil {
-		return
-	}
-	if err = msg.Read(ctx, t.Protocol); err != nil {
-		return
-	}
-	return
-}
-
-// TDeserializerPool is the thread-safe version of TDeserializer,
-// it uses resource pool of TDeserializer under the hood.
-//
-// It must be initialized with either NewTDeserializerPool or
-// NewTDeserializerPoolSizeFactory.
-type TDeserializerPool struct {
-	pool sync.Pool
-}
-
-// NewTDeserializerPool creates a new TDeserializerPool.
-//
-// NewTDeserializer can be used as the arg here.
-func NewTDeserializerPool(f func() *TDeserializer) *TDeserializerPool {
-	return &TDeserializerPool{
-		pool: sync.Pool{
-			New: func() interface{} {
-				return f()
-			},
-		},
-	}
-}
-
-// NewTDeserializerPoolSizeFactory creates a new TDeserializerPool with
-// the given size and protocol factory.
-//
-// Note that the size is not the limit. The TMemoryBuffer underneath can grow
-// larger than that. It just dictates the initial size.
-func NewTDeserializerPoolSizeFactory(size int, factory TProtocolFactory) *TDeserializerPool {
-	return &TDeserializerPool{
-		pool: sync.Pool{
-			New: func() interface{} {
-				transport := NewTMemoryBufferLen(size)
-				protocol := factory.GetProtocol(transport)
-
-				return &TDeserializer{
-					Transport: transport,
-					Protocol:  protocol,
-				}
-			},
-		},
-	}
-}
-
-func (t *TDeserializerPool) ReadString(ctx context.Context, msg TStruct, s string) error {
-	d := t.pool.Get().(*TDeserializer)
-	defer t.pool.Put(d)
-	return d.ReadString(ctx, msg, s)
-}
-
-func (t *TDeserializerPool) Read(ctx context.Context, msg TStruct, b []byte) error {
-	d := t.pool.Get().(*TDeserializer)
-	defer t.pool.Put(d)
-	return d.Read(ctx, msg, b)
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go
deleted file mode 100644
index 630b938f004..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/exception.go
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"errors"
-)
-
-// Generic Thrift exception
-type TException interface {
-	error
-
-	TExceptionType() TExceptionType
-}
-
-// Prepends additional information to an error without losing the Thrift exception interface
-func PrependError(prepend string, err error) error {
-	msg := prepend + err.Error()
-
-	var te TException
-	if errors.As(err, &te) {
-		switch te.TExceptionType() {
-		case TExceptionTypeTransport:
-			if t, ok := err.(TTransportException); ok {
-				return prependTTransportException(prepend, t)
-			}
-		case TExceptionTypeProtocol:
-			if t, ok := err.(TProtocolException); ok {
-				return prependTProtocolException(prepend, t)
-			}
-		case TExceptionTypeApplication:
-			var t TApplicationException
-			if errors.As(err, &t) {
-				return NewTApplicationException(t.TypeId(), msg)
-			}
-		}
-
-		return wrappedTException{
-			err:            err,
-			msg:            msg,
-			tExceptionType: te.TExceptionType(),
-		}
-	}
-
-	return errors.New(msg)
-}
-
-// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
-type TExceptionType byte
-
-// TExceptionType values
-const (
-	TExceptionTypeUnknown     TExceptionType = iota
-	TExceptionTypeCompiled                   // TExceptions defined in thrift files and generated by thrift compiler
-	TExceptionTypeApplication                // TApplicationExceptions
-	TExceptionTypeProtocol                   // TProtocolExceptions
-	TExceptionTypeTransport                  // TTransportExceptions
-)
-
-// WrapTException wraps an error into TException.
-//
-// If err is nil or already TException, it's returned as-is.
-// Otherwise it will be wrapped into TException with TExceptionType() returning
-// TExceptionTypeUnknown, and Unwrap() returning the original error.
-func WrapTException(err error) TException {
-	if err == nil {
-		return nil
-	}
-
-	if te, ok := err.(TException); ok {
-		return te
-	}
-
-	return wrappedTException{
-		err:            err,
-		msg:            err.Error(),
-		tExceptionType: TExceptionTypeUnknown,
-	}
-}
-
-type wrappedTException struct {
-	err            error
-	msg            string
-	tExceptionType TExceptionType
-}
-
-func (w wrappedTException) Error() string {
-	return w.msg
-}
-
-func (w wrappedTException) TExceptionType() TExceptionType {
-	return w.tExceptionType
-}
-
-func (w wrappedTException) Unwrap() error {
-	return w.err
-}
-
-var _ TException = wrappedTException{}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go
deleted file mode 100644
index f683e7f544b..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/framed_transport.go
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"bufio"
-	"bytes"
-	"context"
-	"encoding/binary"
-	"fmt"
-	"io"
-)
-
-// Deprecated: Use DEFAULT_MAX_FRAME_SIZE instead.
-const DEFAULT_MAX_LENGTH = 16384000
-
-type TFramedTransport struct {
-	transport TTransport
-
-	cfg *TConfiguration
-
-	writeBuf bytes.Buffer
-
-	reader  *bufio.Reader
-	readBuf bytes.Buffer
-
-	buffer [4]byte
-}
-
-type tFramedTransportFactory struct {
-	factory TTransportFactory
-	cfg     *TConfiguration
-}
-
-// Deprecated: Use NewTFramedTransportFactoryConf instead.
-func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
-	return NewTFramedTransportFactoryConf(factory, &TConfiguration{
-		MaxFrameSize: DEFAULT_MAX_LENGTH,
-
-		noPropagation: true,
-	})
-}
-
-// Deprecated: Use NewTFramedTransportFactoryConf instead.
-func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
-	return NewTFramedTransportFactoryConf(factory, &TConfiguration{
-		MaxFrameSize: int32(maxLength),
-
-		noPropagation: true,
-	})
-}
-
-func NewTFramedTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
-	PropagateTConfiguration(factory, conf)
-	return &tFramedTransportFactory{
-		factory: factory,
-		cfg:     conf,
-	}
-}
-
-func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
-	PropagateTConfiguration(base, p.cfg)
-	tt, err := p.factory.GetTransport(base)
-	if err != nil {
-		return nil, err
-	}
-	return NewTFramedTransportConf(tt, p.cfg), nil
-}
-
-func (p *tFramedTransportFactory) SetTConfiguration(cfg *TConfiguration) {
-	PropagateTConfiguration(p.factory, cfg)
-	p.cfg = cfg
-}
-
-// Deprecated: Use NewTFramedTransportConf instead.
-func NewTFramedTransport(transport TTransport) *TFramedTransport {
-	return NewTFramedTransportConf(transport, &TConfiguration{
-		MaxFrameSize: DEFAULT_MAX_LENGTH,
-
-		noPropagation: true,
-	})
-}
-
-// Deprecated: Use NewTFramedTransportConf instead.
-func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
-	return NewTFramedTransportConf(transport, &TConfiguration{
-		MaxFrameSize: int32(maxLength),
-
-		noPropagation: true,
-	})
-}
-
-func NewTFramedTransportConf(transport TTransport, conf *TConfiguration) *TFramedTransport {
-	PropagateTConfiguration(transport, conf)
-	return &TFramedTransport{
-		transport: transport,
-		reader:    bufio.NewReader(transport),
-		cfg:       conf,
-	}
-}
-
-func (p *TFramedTransport) Open() error {
-	return p.transport.Open()
-}
-
-func (p *TFramedTransport) IsOpen() bool {
-	return p.transport.IsOpen()
-}
-
-func (p *TFramedTransport) Close() error {
-	return p.transport.Close()
-}
-
-func (p *TFramedTransport) Read(buf []byte) (read int, err error) {
-	read, err = p.readBuf.Read(buf)
-	if err != io.EOF {
-		return
-	}
-
-	// For bytes.Buffer.Read, EOF would only happen when read is zero,
-	// but still, do a sanity check,
-	// in case that behavior is changed in a future version of go stdlib.
-	// When that happens, just return nil error,
-	// and let the caller call Read again to read the next frame.
-	if read > 0 {
-		return read, nil
-	}
-
-	// Reaching here means that the last Read finished the last frame,
-	// so we need to read the next frame into readBuf now.
-	if err = p.readFrame(); err != nil {
-		return read, err
-	}
-	newRead, err := p.Read(buf[read:])
-	return read + newRead, err
-}
-
-func (p *TFramedTransport) ReadByte() (c byte, err error) {
-	buf := p.buffer[:1]
-	_, err = p.Read(buf)
-	if err != nil {
-		return
-	}
-	c = buf[0]
-	return
-}
-
-func (p *TFramedTransport) Write(buf []byte) (int, error) {
-	n, err := p.writeBuf.Write(buf)
-	return n, NewTTransportExceptionFromError(err)
-}
-
-func (p *TFramedTransport) WriteByte(c byte) error {
-	return p.writeBuf.WriteByte(c)
-}
-
-func (p *TFramedTransport) WriteString(s string) (n int, err error) {
-	return p.writeBuf.WriteString(s)
-}
-
-func (p *TFramedTransport) Flush(ctx context.Context) error {
-	size := p.writeBuf.Len()
-	buf := p.buffer[:4]
-	binary.BigEndian.PutUint32(buf, uint32(size))
-	_, err := p.transport.Write(buf)
-	if err != nil {
-		p.writeBuf.Reset()
-		return NewTTransportExceptionFromError(err)
-	}
-	if size > 0 {
-		if _, err := io.Copy(p.transport, &p.writeBuf); err != nil {
-			p.writeBuf.Reset()
-			return NewTTransportExceptionFromError(err)
-		}
-	}
-	err = p.transport.Flush(ctx)
-	return NewTTransportExceptionFromError(err)
-}
-
-func (p *TFramedTransport) readFrame() error {
-	buf := p.buffer[:4]
-	if _, err := io.ReadFull(p.reader, buf); err != nil {
-		return err
-	}
-	size := binary.BigEndian.Uint32(buf)
-	if size < 0 || size > uint32(p.cfg.GetMaxFrameSize()) {
-		return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
-	}
-	_, err := io.CopyN(&p.readBuf, p.reader, int64(size))
-	return NewTTransportExceptionFromError(err)
-}
-
-func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
-	return uint64(p.readBuf.Len())
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (p *TFramedTransport) SetTConfiguration(cfg *TConfiguration) {
-	PropagateTConfiguration(p.transport, cfg)
-	p.cfg = cfg
-}
-
-var (
-	_ TConfigurationSetter = (*tFramedTransportFactory)(nil)
-	_ TConfigurationSetter = (*TFramedTransport)(nil)
-)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go
deleted file mode 100644
index ac9bd4882be..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_context.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-)
-
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
-type (
-	headerKey     string
-	headerKeyList int
-)
-
-// Values for headerKeyList.
-const (
-	headerKeyListRead headerKeyList = iota
-	headerKeyListWrite
-)
-
-// SetHeader sets a header in the context.
-func SetHeader(ctx context.Context, key, value string) context.Context {
-	return context.WithValue(
-		ctx,
-		headerKey(key),
-		value,
-	)
-}
-
-// UnsetHeader unsets a previously set header in the context.
-func UnsetHeader(ctx context.Context, key string) context.Context {
-	return context.WithValue(
-		ctx,
-		headerKey(key),
-		nil,
-	)
-}
-
-// GetHeader returns a value of the given header from the context.
-func GetHeader(ctx context.Context, key string) (value string, ok bool) {
-	if v := ctx.Value(headerKey(key)); v != nil {
-		value, ok = v.(string)
-	}
-	return
-}
-
-// SetReadHeaderList sets the key list of read THeaders in the context.
-func SetReadHeaderList(ctx context.Context, keys []string) context.Context {
-	return context.WithValue(
-		ctx,
-		headerKeyListRead,
-		keys,
-	)
-}
-
-// GetReadHeaderList returns the key list of read THeaders from the context.
-func GetReadHeaderList(ctx context.Context) []string {
-	if v := ctx.Value(headerKeyListRead); v != nil {
-		if value, ok := v.([]string); ok {
-			return value
-		}
-	}
-	return nil
-}
-
-// SetWriteHeaderList sets the key list of THeaders to write in the context.
-func SetWriteHeaderList(ctx context.Context, keys []string) context.Context {
-	return context.WithValue(
-		ctx,
-		headerKeyListWrite,
-		keys,
-	)
-}
-
-// GetWriteHeaderList returns the key list of THeaders to write from the context.
-func GetWriteHeaderList(ctx context.Context) []string {
-	if v := ctx.Value(headerKeyListWrite); v != nil {
-		if value, ok := v.([]string); ok {
-			return value
-		}
-	}
-	return nil
-}
-
-// AddReadTHeaderToContext adds the whole THeader headers into context.
-func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context {
-	keys := make([]string, 0, len(headers))
-	for key, value := range headers {
-		ctx = SetHeader(ctx, key, value)
-		keys = append(keys, key)
-	}
-	return SetReadHeaderList(ctx, keys)
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go
deleted file mode 100644
index 878041f8df1..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_protocol.go
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"errors"
-)
-
-// THeaderProtocol is a thrift protocol that implements THeader:
-// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
-//
-// It supports either binary or compact protocol as the wrapped protocol.
-//
-// Most of the THeader handlings are happening inside THeaderTransport.
-type THeaderProtocol struct {
-	transport *THeaderTransport
-
-	// Will be initialized on first read/write.
-	protocol TProtocol
-
-	cfg *TConfiguration
-}
-
-// Deprecated: Use NewTHeaderProtocolConf instead.
-func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
-	return newTHeaderProtocolConf(trans, &TConfiguration{
-		noPropagation: true,
-	})
-}
-
-// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
-// transport with given TConfiguration.
-//
-// The passed in transport will be wrapped with THeaderTransport.
-//
-// Note that THeaderTransport handles frame and zlib by itself,
-// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
-// instead of rich transports like TZlibTransport or TFramedTransport.
-func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
-	return newTHeaderProtocolConf(trans, conf)
-}
-
-func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
-	t := NewTHeaderTransportConf(trans, cfg)
-	p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
-	PropagateTConfiguration(p, cfg)
-	return &THeaderProtocol{
-		transport: t,
-		protocol:  p,
-		cfg:       cfg,
-	}
-}
-
-type tHeaderProtocolFactory struct {
-	cfg *TConfiguration
-}
-
-func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
-	return newTHeaderProtocolConf(trans, f.cfg)
-}
-
-func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
-	f.cfg = cfg
-}
-
-// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
-func NewTHeaderProtocolFactory() TProtocolFactory {
-	return NewTHeaderProtocolFactoryConf(&TConfiguration{
-		noPropagation: true,
-	})
-}
-
-// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
-// TConfiguration.
-func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
-	return tHeaderProtocolFactory{
-		cfg: conf,
-	}
-}
-
-// Transport returns the underlying transport.
-//
-// It's guaranteed to be of type *THeaderTransport.
-func (p *THeaderProtocol) Transport() TTransport {
-	return p.transport
-}
-
-// GetReadHeaders returns the THeaderMap read from transport.
-func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
-	return p.transport.GetReadHeaders()
-}
-
-// SetWriteHeader sets a header for write.
-func (p *THeaderProtocol) SetWriteHeader(key, value string) {
-	p.transport.SetWriteHeader(key, value)
-}
-
-// ClearWriteHeaders clears all write headers previously set.
-func (p *THeaderProtocol) ClearWriteHeaders() {
-	p.transport.ClearWriteHeaders()
-}
-
-// AddTransform add a transform for writing.
-func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
-	return p.transport.AddTransform(transform)
-}
-
-func (p *THeaderProtocol) Flush(ctx context.Context) error {
-	return p.transport.Flush(ctx)
-}
-
-func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
-	newProto, err := p.transport.Protocol().GetProtocol(p.transport)
-	if err != nil {
-		return err
-	}
-	PropagateTConfiguration(newProto, p.cfg)
-	p.protocol = newProto
-	p.transport.SequenceID = seqID
-	return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
-}
-
-func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
-	if err := p.protocol.WriteMessageEnd(ctx); err != nil {
-		return err
-	}
-	return p.transport.Flush(ctx)
-}
-
-func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
-	return p.protocol.WriteStructBegin(ctx, name)
-}
-
-func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
-	return p.protocol.WriteStructEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
-	return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
-}
-
-func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
-	return p.protocol.WriteFieldEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
-	return p.protocol.WriteFieldStop(ctx)
-}
-
-func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
-	return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
-}
-
-func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
-	return p.protocol.WriteMapEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
-	return p.protocol.WriteListBegin(ctx, elemType, size)
-}
-
-func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
-	return p.protocol.WriteListEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
-	return p.protocol.WriteSetBegin(ctx, elemType, size)
-}
-
-func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
-	return p.protocol.WriteSetEnd(ctx)
-}
-
-func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
-	return p.protocol.WriteBool(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
-	return p.protocol.WriteByte(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
-	return p.protocol.WriteI16(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
-	return p.protocol.WriteI32(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
-	return p.protocol.WriteI64(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
-	return p.protocol.WriteDouble(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
-	return p.protocol.WriteString(ctx, value)
-}
-
-func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
-	return p.protocol.WriteBinary(ctx, value)
-}
-
-// ReadFrame calls underlying THeaderTransport's ReadFrame function.
-func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
-	return p.transport.ReadFrame(ctx)
-}
-
-func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
-	if err = p.transport.ReadFrame(ctx); err != nil {
-		return
-	}
-
-	var newProto TProtocol
-	newProto, err = p.transport.Protocol().GetProtocol(p.transport)
-	if err != nil {
-		var tAppExc TApplicationException
-		if !errors.As(err, &tAppExc) {
-			return
-		}
-		if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
-			return
-		}
-		if e := tAppExc.Write(ctx, p.protocol); e != nil {
-			return
-		}
-		if e := p.protocol.WriteMessageEnd(ctx); e != nil {
-			return
-		}
-		if e := p.transport.Flush(ctx); e != nil {
-			return
-		}
-		return
-	}
-	PropagateTConfiguration(newProto, p.cfg)
-	p.protocol = newProto
-
-	return p.protocol.ReadMessageBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
-	return p.protocol.ReadMessageEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
-	return p.protocol.ReadStructBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
-	return p.protocol.ReadStructEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
-	return p.protocol.ReadFieldBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
-	return p.protocol.ReadFieldEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
-	return p.protocol.ReadMapBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
-	return p.protocol.ReadMapEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
-	return p.protocol.ReadListBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
-	return p.protocol.ReadListEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
-	return p.protocol.ReadSetBegin(ctx)
-}
-
-func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
-	return p.protocol.ReadSetEnd(ctx)
-}
-
-func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
-	return p.protocol.ReadBool(ctx)
-}
-
-func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
-	return p.protocol.ReadByte(ctx)
-}
-
-func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
-	return p.protocol.ReadI16(ctx)
-}
-
-func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
-	return p.protocol.ReadI32(ctx)
-}
-
-func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
-	return p.protocol.ReadI64(ctx)
-}
-
-func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
-	return p.protocol.ReadDouble(ctx)
-}
-
-func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
-	return p.protocol.ReadString(ctx)
-}
-
-func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
-	return p.protocol.ReadBinary(ctx)
-}
-
-func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
-	return p.protocol.Skip(ctx, fieldType)
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
-	PropagateTConfiguration(p.transport, cfg)
-	PropagateTConfiguration(p.protocol, cfg)
-	p.cfg = cfg
-}
-
-var (
-	_ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
-	_ TConfigurationSetter = (*THeaderProtocol)(nil)
-)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go
deleted file mode 100644
index 6a99535a459..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/header_transport.go
+++ /dev/null
@@ -1,809 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"bufio"
-	"bytes"
-	"compress/zlib"
-	"context"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-)
-
-// Size in bytes for 32-bit ints.
-const size32 = 4
-
-type headerMeta struct {
-	MagicFlags   uint32
-	SequenceID   int32
-	HeaderLength uint16
-}
-
-const headerMetaSize = 10
-
-type clientType int
-
-const (
-	clientUnknown clientType = iota
-	clientHeaders
-	clientFramedBinary
-	clientUnframedBinary
-	clientFramedCompact
-	clientUnframedCompact
-)
-
-// Constants defined in THeader format:
-// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
-const (
-	THeaderHeaderMagic  uint32 = 0x0fff0000
-	THeaderHeaderMask   uint32 = 0xffff0000
-	THeaderFlagsMask    uint32 = 0x0000ffff
-	THeaderMaxFrameSize uint32 = 0x3fffffff
-)
-
-// THeaderMap is the type of the header map in THeader transport.
-type THeaderMap map[string]string
-
-// THeaderProtocolID is the wrapped protocol id used in THeader.
-type THeaderProtocolID int32
-
-// Supported THeaderProtocolID values.
-const (
-	THeaderProtocolBinary  THeaderProtocolID = 0x00
-	THeaderProtocolCompact THeaderProtocolID = 0x02
-	THeaderProtocolDefault                   = THeaderProtocolBinary
-)
-
-// Declared globally to avoid repetitive allocations, not really used.
-var globalMemoryBuffer = NewTMemoryBuffer()
-
-// Validate checks whether the THeaderProtocolID is a valid/supported one.
-func (id THeaderProtocolID) Validate() error {
-	_, err := id.GetProtocol(globalMemoryBuffer)
-	return err
-}
-
-// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
-func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
-	switch id {
-	default:
-		return nil, NewTApplicationException(
-			INVALID_PROTOCOL,
-			fmt.Sprintf("THeader protocol id %d not supported", id),
-		)
-	case THeaderProtocolBinary:
-		return NewTBinaryProtocolTransport(trans), nil
-	case THeaderProtocolCompact:
-		return NewTCompactProtocol(trans), nil
-	}
-}
-
-// THeaderTransformID defines the numeric id of the transform used.
-type THeaderTransformID int32
-
-// THeaderTransformID values.
-//
-// Values not defined here are not currently supported, namely HMAC and Snappy.
-const (
-	TransformNone THeaderTransformID = iota // 0, no special handling
-	TransformZlib                           // 1, zlib
-)
-
-var supportedTransformIDs = map[THeaderTransformID]bool{
-	TransformNone: true,
-	TransformZlib: true,
-}
-
-// TransformReader is an io.ReadCloser that handles transforms reading.
-type TransformReader struct {
-	io.Reader
-
-	closers []io.Closer
-}
-
-var _ io.ReadCloser = (*TransformReader)(nil)
-
-// NewTransformReaderWithCapacity initializes a TransformReader with expected
-// closers capacity.
-//
-// If you don't know the closers capacity beforehand, just use
-//
-//     &TransformReader{Reader: baseReader}
-//
-// instead would be sufficient.
-func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader {
-	return &TransformReader{
-		Reader:  baseReader,
-		closers: make([]io.Closer, 0, capacity),
-	}
-}
-
-// Close calls the underlying closers in appropriate order,
-// stops at and returns the first error encountered.
-func (tr *TransformReader) Close() error {
-	// Call closers in reversed order
-	for i := len(tr.closers) - 1; i >= 0; i-- {
-		if err := tr.closers[i].Close(); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// AddTransform adds a transform.
-func (tr *TransformReader) AddTransform(id THeaderTransformID) error {
-	switch id {
-	default:
-		return NewTApplicationException(
-			INVALID_TRANSFORM,
-			fmt.Sprintf("THeaderTransformID %d not supported", id),
-		)
-	case TransformNone:
-		// no-op
-	case TransformZlib:
-		readCloser, err := zlib.NewReader(tr.Reader)
-		if err != nil {
-			return err
-		}
-		tr.Reader = readCloser
-		tr.closers = append(tr.closers, readCloser)
-	}
-	return nil
-}
-
-// TransformWriter is an io.WriteCloser that handles transforms writing.
-type TransformWriter struct {
-	io.Writer
-
-	closers []io.Closer
-}
-
-var _ io.WriteCloser = (*TransformWriter)(nil)
-
-// NewTransformWriter creates a new TransformWriter with base writer and transforms.
-func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) {
-	writer := &TransformWriter{
-		Writer:  baseWriter,
-		closers: make([]io.Closer, 0, len(transforms)),
-	}
-	for _, id := range transforms {
-		if err := writer.AddTransform(id); err != nil {
-			return nil, err
-		}
-	}
-	return writer, nil
-}
-
-// Close calls the underlying closers in appropriate order,
-// stops at and returns the first error encountered.
-func (tw *TransformWriter) Close() error {
-	// Call closers in reversed order
-	for i := len(tw.closers) - 1; i >= 0; i-- {
-		if err := tw.closers[i].Close(); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// AddTransform adds a transform.
-func (tw *TransformWriter) AddTransform(id THeaderTransformID) error {
-	switch id {
-	default:
-		return NewTApplicationException(
-			INVALID_TRANSFORM,
-			fmt.Sprintf("THeaderTransformID %d not supported", id),
-		)
-	case TransformNone:
-		// no-op
-	case TransformZlib:
-		writeCloser := zlib.NewWriter(tw.Writer)
-		tw.Writer = writeCloser
-		tw.closers = append(tw.closers, writeCloser)
-	}
-	return nil
-}
-
-// THeaderInfoType is the type id of the info headers.
-type THeaderInfoType int32
-
-// Supported THeaderInfoType values.
-const (
-	_            THeaderInfoType = iota // Skip 0
-	InfoKeyValue                        // 1
-	// Rest of the info types are not supported.
-)
-
-// THeaderTransport is a Transport mode that implements THeader.
-//
-// Note that THeaderTransport handles frame and zlib by itself,
-// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
-// instead of rich transports like TZlibTransport or TFramedTransport.
-type THeaderTransport struct {
-	SequenceID int32
-	Flags      uint32
-
-	transport TTransport
-
-	// THeaderMap for read and write
-	readHeaders  THeaderMap
-	writeHeaders THeaderMap
-
-	// Reading related variables.
-	reader *bufio.Reader
-	// When frame is detected, we read the frame fully into frameBuffer.
-	frameBuffer bytes.Buffer
-	// When it's non-nil, Read should read from frameReader instead of
-	// reader, and EOF error indicates end of frame instead of end of all
-	// transport.
-	frameReader io.ReadCloser
-
-	// Writing related variables
-	writeBuffer     bytes.Buffer
-	writeTransforms []THeaderTransformID
-
-	clientType clientType
-	protocolID THeaderProtocolID
-	cfg        *TConfiguration
-
-	// buffer is used in the following scenarios to avoid repetitive
-	// allocations, while 4 is big enough for all those scenarios:
-	//
-	// * header padding (max size 4)
-	// * write the frame size (size 4)
-	buffer [4]byte
-}
-
-var _ TTransport = (*THeaderTransport)(nil)
-
-// Deprecated: Use NewTHeaderTransportConf instead.
-func NewTHeaderTransport(trans TTransport) *THeaderTransport {
-	return NewTHeaderTransportConf(trans, &TConfiguration{
-		noPropagation: true,
-	})
-}
-
-// NewTHeaderTransportConf creates THeaderTransport from the
-// underlying transport, with given TConfiguration attached.
-//
-// If trans is already a *THeaderTransport, it will be returned as is,
-// but with TConfiguration overridden by the value passed in.
-//
-// The protocol ID in TConfiguration is only useful for client transports.
-// For servers,
-// the protocol ID will be overridden again to the one set by the client,
-// to ensure that servers always speak the same dialect as the client.
-func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
-	if ht, ok := trans.(*THeaderTransport); ok {
-		ht.SetTConfiguration(conf)
-		return ht
-	}
-	PropagateTConfiguration(trans, conf)
-	return &THeaderTransport{
-		transport:    trans,
-		reader:       bufio.NewReader(trans),
-		writeHeaders: make(THeaderMap),
-		protocolID:   conf.GetTHeaderProtocolID(),
-		cfg:          conf,
-	}
-}
-
-// Open calls the underlying transport's Open function.
-func (t *THeaderTransport) Open() error {
-	return t.transport.Open()
-}
-
-// IsOpen calls the underlying transport's IsOpen function.
-func (t *THeaderTransport) IsOpen() bool {
-	return t.transport.IsOpen()
-}
-
-// ReadFrame tries to read the frame header, guess the client type, and handle
-// unframed clients.
-func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
-	if !t.needReadFrame() {
-		// No need to read frame, skipping.
-		return nil
-	}
-
-	// Peek and handle the first 32 bits.
-	// They could either be the length field of a framed message,
-	// or the first bytes of an unframed message.
-	var buf []byte
-	var err error
-	// This is also usually the first read from a connection,
-	// so handle retries around socket timeouts.
-	_, deadlineSet := ctx.Deadline()
-	for {
-		buf, err = t.reader.Peek(size32)
-		if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
-			// This is I/O timeout and we still have time,
-			// continue trying
-			continue
-		}
-		// For anything else, do not retry
-		break
-	}
-	if err != nil {
-		return err
-	}
-
-	frameSize := binary.BigEndian.Uint32(buf)
-	if frameSize&VERSION_MASK == VERSION_1 {
-		t.clientType = clientUnframedBinary
-		return nil
-	}
-	if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
-		t.clientType = clientUnframedCompact
-		return nil
-	}
-
-	// At this point it should be a framed message,
-	// sanity check on frameSize then discard the peeked part.
-	if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
-		return NewTProtocolExceptionWithType(
-			SIZE_LIMIT,
-			errors.New("frame too large"),
-		)
-	}
-	t.reader.Discard(size32)
-
-	// Read the frame fully into frameBuffer.
-	_, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
-	if err != nil {
-		return err
-	}
-	t.frameReader = io.NopCloser(&t.frameBuffer)
-
-	// Peek and handle the next 32 bits.
-	buf = t.frameBuffer.Bytes()[:size32]
-	version := binary.BigEndian.Uint32(buf)
-	if version&THeaderHeaderMask == THeaderHeaderMagic {
-		t.clientType = clientHeaders
-		return t.parseHeaders(ctx, frameSize)
-	}
-	if version&VERSION_MASK == VERSION_1 {
-		t.clientType = clientFramedBinary
-		return nil
-	}
-	if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
-		t.clientType = clientFramedCompact
-		return nil
-	}
-	if err := t.endOfFrame(); err != nil {
-		return err
-	}
-	return NewTProtocolExceptionWithType(
-		NOT_IMPLEMENTED,
-		errors.New("unsupported client transport type"),
-	)
-}
-
-// endOfFrame does end of frame handling.
-//
-// It closes frameReader, and also resets frame related states.
-func (t *THeaderTransport) endOfFrame() error {
-	defer func() {
-		t.frameBuffer.Reset()
-		t.frameReader = nil
-	}()
-	return t.frameReader.Close()
-}
-
-func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
-	if t.clientType != clientHeaders {
-		return nil
-	}
-
-	var err error
-	var meta headerMeta
-	if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil {
-		return err
-	}
-	frameSize -= headerMetaSize
-	t.Flags = meta.MagicFlags & THeaderFlagsMask
-	t.SequenceID = meta.SequenceID
-	headerLength := int64(meta.HeaderLength) * 4
-	if int64(frameSize) < headerLength {
-		return NewTProtocolExceptionWithType(
-			SIZE_LIMIT,
-			errors.New("header size is larger than the whole frame"),
-		)
-	}
-	headerBuf := NewTMemoryBuffer()
-	_, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
-	if err != nil {
-		return err
-	}
-	hp := NewTCompactProtocol(headerBuf)
-	hp.SetTConfiguration(t.cfg)
-
-	// At this point the header is already read into headerBuf,
-	// and t.frameBuffer starts from the actual payload.
-	protoID, err := hp.readVarint32()
-	if err != nil {
-		return err
-	}
-	t.protocolID = THeaderProtocolID(protoID)
-
-	var transformCount int32
-	transformCount, err = hp.readVarint32()
-	if err != nil {
-		return err
-	}
-	if transformCount > 0 {
-		reader := NewTransformReaderWithCapacity(
-			&t.frameBuffer,
-			int(transformCount),
-		)
-		t.frameReader = reader
-		transformIDs := make([]THeaderTransformID, transformCount)
-		for i := 0; i < int(transformCount); i++ {
-			id, err := hp.readVarint32()
-			if err != nil {
-				return err
-			}
-			transformIDs[i] = THeaderTransformID(id)
-		}
-		// The transform IDs on the wire was added based on the order of
-		// writing, so on the reading side we need to reverse the order.
-		for i := transformCount - 1; i >= 0; i-- {
-			id := transformIDs[i]
-			if err := reader.AddTransform(id); err != nil {
-				return err
-			}
-		}
-	}
-
-	// The info part does not use the transforms yet, so it's
-	// important to continue using headerBuf.
-	headers := make(THeaderMap)
-	for {
-		infoType, err := hp.readVarint32()
-		if errors.Is(err, io.EOF) {
-			break
-		}
-		if err != nil {
-			return err
-		}
-		if THeaderInfoType(infoType) == InfoKeyValue {
-			count, err := hp.readVarint32()
-			if err != nil {
-				return err
-			}
-			for i := 0; i < int(count); i++ {
-				key, err := hp.ReadString(ctx)
-				if err != nil {
-					return err
-				}
-				value, err := hp.ReadString(ctx)
-				if err != nil {
-					return err
-				}
-				headers[key] = value
-			}
-		} else {
-			// Skip reading info section on the first
-			// unsupported info type.
-			break
-		}
-	}
-	t.readHeaders = headers
-
-	return nil
-}
-
-func (t *THeaderTransport) needReadFrame() bool {
-	if t.clientType == clientUnknown {
-		// This is a new connection that's never read before.
-		return true
-	}
-	if t.isFramed() && t.frameReader == nil {
-		// We just finished the last frame.
-		return true
-	}
-	return false
-}
-
-func (t *THeaderTransport) Read(p []byte) (read int, err error) {
-	// Here using context.Background instead of a context passed in is safe.
-	// First is that there's no way to pass context into this function.
-	// Then, 99% of the case when calling this Read frame is already read
-	// into frameReader. ReadFrame here is more of preventing bugs that
-	// didn't call ReadFrame before calling Read.
-	err = t.ReadFrame(context.Background())
-	if err != nil {
-		return
-	}
-	if t.frameReader != nil {
-		read, err = t.frameReader.Read(p)
-		if err == nil && t.frameBuffer.Len() <= 0 {
-			// the last Read finished the frame, do endOfFrame
-			// handling here.
-			err = t.endOfFrame()
-		} else if err == io.EOF {
-			err = t.endOfFrame()
-			if err != nil {
-				return
-			}
-			if read == 0 {
-				// Try to read the next frame when we hit EOF
-				// (end of frame) immediately.
-				// When we got here, it means the last read
-				// finished the previous frame, but didn't
-				// do endOfFrame handling yet.
-				// We have to read the next frame here,
-				// as otherwise we would return 0 and nil,
-				// which is a case not handled well by most
-				// protocol implementations.
-				return t.Read(p)
-			}
-		}
-		return
-	}
-	return t.reader.Read(p)
-}
-
-// Write writes data to the write buffer.
-//
-// You need to call Flush to actually write them to the transport.
-func (t *THeaderTransport) Write(p []byte) (int, error) {
-	return t.writeBuffer.Write(p)
-}
-
-// Flush writes the appropriate header and the write buffer to the underlying transport.
-func (t *THeaderTransport) Flush(ctx context.Context) error {
-	if t.writeBuffer.Len() == 0 {
-		return nil
-	}
-
-	defer t.writeBuffer.Reset()
-
-	switch t.clientType {
-	default:
-		fallthrough
-	case clientUnknown:
-		t.clientType = clientHeaders
-		fallthrough
-	case clientHeaders:
-		headers := NewTMemoryBuffer()
-		hp := NewTCompactProtocol(headers)
-		hp.SetTConfiguration(t.cfg)
-		if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-		if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-		for _, transform := range t.writeTransforms {
-			if _, err := hp.writeVarint32(int32(transform)); err != nil {
-				return NewTTransportExceptionFromError(err)
-			}
-		}
-		if len(t.writeHeaders) > 0 {
-			if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil {
-				return NewTTransportExceptionFromError(err)
-			}
-			if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil {
-				return NewTTransportExceptionFromError(err)
-			}
-			for key, value := range t.writeHeaders {
-				if err := hp.WriteString(ctx, key); err != nil {
-					return NewTTransportExceptionFromError(err)
-				}
-				if err := hp.WriteString(ctx, value); err != nil {
-					return NewTTransportExceptionFromError(err)
-				}
-			}
-		}
-		padding := 4 - headers.Len()%4
-		if padding < 4 {
-			buf := t.buffer[:padding]
-			for i := range buf {
-				buf[i] = 0
-			}
-			if _, err := headers.Write(buf); err != nil {
-				return NewTTransportExceptionFromError(err)
-			}
-		}
-
-		var payload bytes.Buffer
-		meta := headerMeta{
-			MagicFlags:   THeaderHeaderMagic + t.Flags&THeaderFlagsMask,
-			SequenceID:   t.SequenceID,
-			HeaderLength: uint16(headers.Len() / 4),
-		}
-		if err := binary.Write(&payload, binary.BigEndian, meta); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-		if _, err := io.Copy(&payload, headers); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-
-		writer, err := NewTransformWriter(&payload, t.writeTransforms)
-		if err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-		if _, err := io.Copy(writer, &t.writeBuffer); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-		if err := writer.Close(); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-
-		// First write frame length
-		buf := t.buffer[:size32]
-		binary.BigEndian.PutUint32(buf, uint32(payload.Len()))
-		if _, err := t.transport.Write(buf); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-		// Then write the payload
-		if _, err := io.Copy(t.transport, &payload); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-
-	case clientFramedBinary, clientFramedCompact:
-		buf := t.buffer[:size32]
-		binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len()))
-		if _, err := t.transport.Write(buf); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-		fallthrough
-	case clientUnframedBinary, clientUnframedCompact:
-		if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-	}
-
-	select {
-	default:
-	case <-ctx.Done():
-		return NewTTransportExceptionFromError(ctx.Err())
-	}
-
-	return t.transport.Flush(ctx)
-}
-
-// Close closes the transport, along with its underlying transport.
-func (t *THeaderTransport) Close() error {
-	if err := t.Flush(context.Background()); err != nil {
-		return err
-	}
-	return t.transport.Close()
-}
-
-// RemainingBytes calls underlying transport's RemainingBytes.
-//
-// Even in framed cases, because of all the possible compression transforms
-// involved, the remaining frame size is likely to be different from the actual
-// remaining readable bytes, so we don't bother to keep tracking the remaining
-// frame size by ourselves and just use the underlying transport's
-// RemainingBytes directly.
-func (t *THeaderTransport) RemainingBytes() uint64 {
-	return t.transport.RemainingBytes()
-}
-
-// GetReadHeaders returns the THeaderMap read from transport.
-func (t *THeaderTransport) GetReadHeaders() THeaderMap {
-	return t.readHeaders
-}
-
-// SetWriteHeader sets a header for write.
-func (t *THeaderTransport) SetWriteHeader(key, value string) {
-	t.writeHeaders[key] = value
-}
-
-// ClearWriteHeaders clears all write headers previously set.
-func (t *THeaderTransport) ClearWriteHeaders() {
-	t.writeHeaders = make(THeaderMap)
-}
-
-// AddTransform add a transform for writing.
-func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error {
-	if !supportedTransformIDs[transform] {
-		return NewTProtocolExceptionWithType(
-			NOT_IMPLEMENTED,
-			fmt.Errorf("THeaderTransformID %d not supported", transform),
-		)
-	}
-	t.writeTransforms = append(t.writeTransforms, transform)
-	return nil
-}
-
-// Protocol returns the wrapped protocol id used in this THeaderTransport.
-func (t *THeaderTransport) Protocol() THeaderProtocolID {
-	switch t.clientType {
-	default:
-		return t.protocolID
-	case clientFramedBinary, clientUnframedBinary:
-		return THeaderProtocolBinary
-	case clientFramedCompact, clientUnframedCompact:
-		return THeaderProtocolCompact
-	}
-}
-
-func (t *THeaderTransport) isFramed() bool {
-	switch t.clientType {
-	default:
-		return false
-	case clientHeaders, clientFramedBinary, clientFramedCompact:
-		return true
-	}
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
-	PropagateTConfiguration(t.transport, cfg)
-	t.cfg = cfg
-}
-
-// THeaderTransportFactory is a TTransportFactory implementation to create
-// THeaderTransport.
-//
-// It also implements TConfigurationSetter.
-type THeaderTransportFactory struct {
-	// The underlying factory, could be nil.
-	Factory TTransportFactory
-
-	cfg *TConfiguration
-}
-
-// Deprecated: Use NewTHeaderTransportFactoryConf instead.
-func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
-	return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
-		noPropagation: true,
-	})
-}
-
-// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
-// the given *TConfiguration.
-func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
-	return &THeaderTransportFactory{
-		Factory: factory,
-
-		cfg: conf,
-	}
-}
-
-// GetTransport implements TTransportFactory.
-func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
-	if f.Factory != nil {
-		t, err := f.Factory.GetTransport(trans)
-		if err != nil {
-			return nil, err
-		}
-		return NewTHeaderTransportConf(t, f.cfg), nil
-	}
-	return NewTHeaderTransportConf(trans, f.cfg), nil
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
-	PropagateTConfiguration(f.Factory, f.cfg)
-	f.cfg = cfg
-}
-
-var (
-	_ TConfigurationSetter = (*THeaderTransportFactory)(nil)
-	_ TConfigurationSetter = (*THeaderTransport)(nil)
-)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go
deleted file mode 100644
index 9a2cc98cc76..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_client.go
+++ /dev/null
@@ -1,257 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"bytes"
-	"context"
-	"errors"
-	"io"
-	"net/http"
-	"net/url"
-	"strconv"
-)
-
-// Default to using the shared http client. Library users are
-// free to change this global client or specify one through
-// THttpClientOptions.
-var DefaultHttpClient *http.Client = http.DefaultClient
-
-type THttpClient struct {
-	client             *http.Client
-	response           *http.Response
-	url                *url.URL
-	requestBuffer      *bytes.Buffer
-	header             http.Header
-	nsecConnectTimeout int64
-	nsecReadTimeout    int64
-}
-
-type THttpClientTransportFactory struct {
-	options THttpClientOptions
-	url     string
-}
-
-func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
-	if trans != nil {
-		t, ok := trans.(*THttpClient)
-		if ok && t.url != nil {
-			return NewTHttpClientWithOptions(t.url.String(), p.options)
-		}
-	}
-	return NewTHttpClientWithOptions(p.url, p.options)
-}
-
-type THttpClientOptions struct {
-	// If nil, DefaultHttpClient is used
-	Client *http.Client
-}
-
-func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
-	return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
-}
-
-func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
-	return &THttpClientTransportFactory{url: url, options: options}
-}
-
-func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
-	parsedURL, err := url.Parse(urlstr)
-	if err != nil {
-		return nil, err
-	}
-	buf := make([]byte, 0, 1024)
-	client := options.Client
-	if client == nil {
-		client = DefaultHttpClient
-	}
-	httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}}
-	return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
-}
-
-func NewTHttpClient(urlstr string) (TTransport, error) {
-	return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
-}
-
-// Set the HTTP Header for this specific Thrift Transport
-// It is important that you first assert the TTransport as a THttpClient type
-// like so:
-//
-// httpTrans := trans.(THttpClient)
-// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
-func (p *THttpClient) SetHeader(key string, value string) {
-	p.header.Add(key, value)
-}
-
-// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
-// It is important that you first assert the TTransport as a THttpClient type
-// like so:
-//
-// httpTrans := trans.(THttpClient)
-// hdrValue := httpTrans.GetHeader("User-Agent")
-func (p *THttpClient) GetHeader(key string) string {
-	return p.header.Get(key)
-}
-
-// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
-// It is important that you first assert the TTransport as a THttpClient type
-// like so:
-//
-// httpTrans := trans.(THttpClient)
-// httpTrans.DelHeader("User-Agent")
-func (p *THttpClient) DelHeader(key string) {
-	p.header.Del(key)
-}
-
-func (p *THttpClient) Open() error {
-	// do nothing
-	return nil
-}
-
-func (p *THttpClient) IsOpen() bool {
-	return p.response != nil || p.requestBuffer != nil
-}
-
-func (p *THttpClient) closeResponse() error {
-	var err error
-	if p.response != nil && p.response.Body != nil {
-		// The docs specify that if keepalive is enabled and the response body is not
-		// read to completion the connection will never be returned to the pool and
-		// reused. Errors are being ignored here because if the connection is invalid
-		// and this fails for some reason, the Close() method will do any remaining
-		// cleanup.
-		io.Copy(io.Discard, p.response.Body)
-
-		err = p.response.Body.Close()
-	}
-
-	p.response = nil
-	return err
-}
-
-func (p *THttpClient) Close() error {
-	if p.requestBuffer != nil {
-		p.requestBuffer.Reset()
-		p.requestBuffer = nil
-	}
-	return p.closeResponse()
-}
-
-func (p *THttpClient) Read(buf []byte) (int, error) {
-	if p.response == nil {
-		return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
-	}
-	n, err := p.response.Body.Read(buf)
-	if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
-		return n, nil
-	}
-	return n, NewTTransportExceptionFromError(err)
-}
-
-func (p *THttpClient) ReadByte() (c byte, err error) {
-	if p.response == nil {
-		return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
-	}
-	return readByte(p.response.Body)
-}
-
-func (p *THttpClient) Write(buf []byte) (int, error) {
-	if p.requestBuffer == nil {
-		return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
-	}
-	return p.requestBuffer.Write(buf)
-}
-
-func (p *THttpClient) WriteByte(c byte) error {
-	if p.requestBuffer == nil {
-		return NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
-	}
-	return p.requestBuffer.WriteByte(c)
-}
-
-func (p *THttpClient) WriteString(s string) (n int, err error) {
-	if p.requestBuffer == nil {
-		return 0, NewTTransportException(NOT_OPEN, "Request buffer is nil, connection may have been closed.")
-	}
-	return p.requestBuffer.WriteString(s)
-}
-
-func (p *THttpClient) Flush(ctx context.Context) error {
-	// Close any previous response body to avoid leaking connections.
-	p.closeResponse()
-
-	// Give up the ownership of the current request buffer to http request,
-	// and create a new buffer for the next request.
-	buf := p.requestBuffer
-	p.requestBuffer = new(bytes.Buffer)
-	req, err := http.NewRequest("POST", p.url.String(), buf)
-	if err != nil {
-		return NewTTransportExceptionFromError(err)
-	}
-	req.Header = p.header
-	if ctx != nil {
-		req = req.WithContext(ctx)
-	}
-	response, err := p.client.Do(req)
-	if err != nil {
-		return NewTTransportExceptionFromError(err)
-	}
-	if response.StatusCode != http.StatusOK {
-		// Close the response to avoid leaking file descriptors. closeResponse does
-		// more than just call Close(), so temporarily assign it and reuse the logic.
-		p.response = response
-		p.closeResponse()
-
-		// TODO(pomack) log bad response
-		return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
-	}
-	p.response = response
-	return nil
-}
-
-func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
-	len := p.response.ContentLength
-	if len >= 0 {
-		return uint64(len)
-	}
-
-	const maxSize = ^uint64(0)
-	return maxSize // the truth is, we just don't know unless framed is used
-}
-
-// Deprecated: Use NewTHttpClientTransportFactory instead.
-func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
-	return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
-}
-
-// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead.
-func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
-	return NewTHttpClientTransportFactoryWithOptions(url, options)
-}
-
-// Deprecated: Use NewTHttpClientWithOptions instead.
-func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
-	return NewTHttpClientWithOptions(urlstr, options)
-}
-
-// Deprecated: Use NewTHttpClient instead.
-func NewTHttpPostClient(urlstr string) (TTransport, error) {
-	return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go
deleted file mode 100644
index bc6922762ad..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/http_transport.go
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"compress/gzip"
-	"io"
-	"net/http"
-	"strings"
-	"sync"
-)
-
-// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
-func NewThriftHandlerFunc(processor TProcessor,
-	inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
-
-	return gz(func(w http.ResponseWriter, r *http.Request) {
-		w.Header().Add("Content-Type", "application/x-thrift")
-
-		transport := NewStreamTransport(r.Body, w)
-		processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
-	})
-}
-
-// gz transparently compresses the HTTP response if the client supports it.
-func gz(handler http.HandlerFunc) http.HandlerFunc {
-	sp := &sync.Pool{
-		New: func() interface{} {
-			return gzip.NewWriter(nil)
-		},
-	}
-
-	return func(w http.ResponseWriter, r *http.Request) {
-		if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
-			handler(w, r)
-			return
-		}
-		w.Header().Set("Content-Encoding", "gzip")
-		gz := sp.Get().(*gzip.Writer)
-		gz.Reset(w)
-		defer func() {
-			_ = gz.Close()
-			sp.Put(gz)
-		}()
-		gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
-		handler(gzw, r)
-	}
-}
-
-type gzipResponseWriter struct {
-	io.Writer
-	http.ResponseWriter
-}
-
-func (w gzipResponseWriter) Write(b []byte) (int, error) {
-	return w.Writer.Write(b)
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go
deleted file mode 100644
index 1c477990fea..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/iostream_transport.go
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"bufio"
-	"context"
-	"io"
-)
-
-// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
-type StreamTransport struct {
-	io.Reader
-	io.Writer
-	isReadWriter bool
-	closed       bool
-}
-
-type StreamTransportFactory struct {
-	Reader       io.Reader
-	Writer       io.Writer
-	isReadWriter bool
-}
-
-func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
-	if trans != nil {
-		t, ok := trans.(*StreamTransport)
-		if ok {
-			if t.isReadWriter {
-				return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil
-			}
-			if t.Reader != nil && t.Writer != nil {
-				return NewStreamTransport(t.Reader, t.Writer), nil
-			}
-			if t.Reader != nil && t.Writer == nil {
-				return NewStreamTransportR(t.Reader), nil
-			}
-			if t.Reader == nil && t.Writer != nil {
-				return NewStreamTransportW(t.Writer), nil
-			}
-			return &StreamTransport{}, nil
-		}
-	}
-	if p.isReadWriter {
-		return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil
-	}
-	if p.Reader != nil && p.Writer != nil {
-		return NewStreamTransport(p.Reader, p.Writer), nil
-	}
-	if p.Reader != nil && p.Writer == nil {
-		return NewStreamTransportR(p.Reader), nil
-	}
-	if p.Reader == nil && p.Writer != nil {
-		return NewStreamTransportW(p.Writer), nil
-	}
-	return &StreamTransport{}, nil
-}
-
-func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
-	return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
-}
-
-func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
-	return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
-}
-
-func NewStreamTransportR(r io.Reader) *StreamTransport {
-	return &StreamTransport{Reader: bufio.NewReader(r)}
-}
-
-func NewStreamTransportW(w io.Writer) *StreamTransport {
-	return &StreamTransport{Writer: bufio.NewWriter(w)}
-}
-
-func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
-	bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
-	return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
-}
-
-func (p *StreamTransport) IsOpen() bool {
-	return !p.closed
-}
-
-// implicitly opened on creation, can't be reopened once closed
-func (p *StreamTransport) Open() error {
-	if !p.closed {
-		return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
-	} else {
-		return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
-	}
-}
-
-// Closes both the input and output streams.
-func (p *StreamTransport) Close() error {
-	if p.closed {
-		return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
-	}
-	p.closed = true
-	closedReader := false
-	if p.Reader != nil {
-		c, ok := p.Reader.(io.Closer)
-		if ok {
-			e := c.Close()
-			closedReader = true
-			if e != nil {
-				return e
-			}
-		}
-		p.Reader = nil
-	}
-	if p.Writer != nil && (!closedReader || !p.isReadWriter) {
-		c, ok := p.Writer.(io.Closer)
-		if ok {
-			e := c.Close()
-			if e != nil {
-				return e
-			}
-		}
-		p.Writer = nil
-	}
-	return nil
-}
-
-// Flushes the underlying output stream if not null.
-func (p *StreamTransport) Flush(ctx context.Context) error {
-	if p.Writer == nil {
-		return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
-	}
-	f, ok := p.Writer.(Flusher)
-	if ok {
-		err := f.Flush()
-		if err != nil {
-			return NewTTransportExceptionFromError(err)
-		}
-	}
-	return nil
-}
-
-func (p *StreamTransport) Read(c []byte) (n int, err error) {
-	n, err = p.Reader.Read(c)
-	if err != nil {
-		err = NewTTransportExceptionFromError(err)
-	}
-	return
-}
-
-func (p *StreamTransport) ReadByte() (c byte, err error) {
-	f, ok := p.Reader.(io.ByteReader)
-	if ok {
-		c, err = f.ReadByte()
-	} else {
-		c, err = readByte(p.Reader)
-	}
-	if err != nil {
-		err = NewTTransportExceptionFromError(err)
-	}
-	return
-}
-
-func (p *StreamTransport) Write(c []byte) (n int, err error) {
-	n, err = p.Writer.Write(c)
-	if err != nil {
-		err = NewTTransportExceptionFromError(err)
-	}
-	return
-}
-
-func (p *StreamTransport) WriteByte(c byte) (err error) {
-	f, ok := p.Writer.(io.ByteWriter)
-	if ok {
-		err = f.WriteByte(c)
-	} else {
-		err = writeByte(p.Writer, c)
-	}
-	if err != nil {
-		err = NewTTransportExceptionFromError(err)
-	}
-	return
-}
-
-func (p *StreamTransport) WriteString(s string) (n int, err error) {
-	f, ok := p.Writer.(stringWriter)
-	if ok {
-		n, err = f.WriteString(s)
-	} else {
-		n, err = p.Writer.Write([]byte(s))
-	}
-	if err != nil {
-		err = NewTTransportExceptionFromError(err)
-	}
-	return
-}
-
-func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
-	const maxSize = ^uint64(0)
-	return maxSize // the truth is, we just don't know unless framed is used
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (p *StreamTransport) SetTConfiguration(conf *TConfiguration) {
-	PropagateTConfiguration(p.Reader, conf)
-	PropagateTConfiguration(p.Writer, conf)
-}
-
-var _ TConfigurationSetter = (*StreamTransport)(nil)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go
deleted file mode 100644
index 8e59d16cfda..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/json_protocol.go
+++ /dev/null
@@ -1,591 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"encoding/base64"
-	"fmt"
-)
-
-const (
-	THRIFT_JSON_PROTOCOL_VERSION = 1
-)
-
-// for references to _ParseContext see tsimplejson_protocol.go
-
-// JSON protocol implementation for thrift.
-// Utilizes Simple JSON protocol
-//
-type TJSONProtocol struct {
-	*TSimpleJSONProtocol
-}
-
-// Constructor
-func NewTJSONProtocol(t TTransport) *TJSONProtocol {
-	v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
-	v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
-	v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
-	return v
-}
-
-// Factory
-type TJSONProtocolFactory struct{}
-
-func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
-	return NewTJSONProtocol(trans)
-}
-
-func NewTJSONProtocolFactory() *TJSONProtocolFactory {
-	return &TJSONProtocolFactory{}
-}
-
-func (p *TJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
-	p.resetContextStack() // THRIFT-3735
-	if e := p.OutputListBegin(); e != nil {
-		return e
-	}
-	if e := p.WriteI32(ctx, THRIFT_JSON_PROTOCOL_VERSION); e != nil {
-		return e
-	}
-	if e := p.WriteString(ctx, name); e != nil {
-		return e
-	}
-	if e := p.WriteByte(ctx, int8(typeId)); e != nil {
-		return e
-	}
-	if e := p.WriteI32(ctx, seqId); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TJSONProtocol) WriteMessageEnd(ctx context.Context) error {
-	return p.OutputListEnd()
-}
-
-func (p *TJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
-	if e := p.OutputObjectBegin(); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TJSONProtocol) WriteStructEnd(ctx context.Context) error {
-	return p.OutputObjectEnd()
-}
-
-func (p *TJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
-	if e := p.WriteI16(ctx, id); e != nil {
-		return e
-	}
-	if e := p.OutputObjectBegin(); e != nil {
-		return e
-	}
-	s, e1 := p.TypeIdToString(typeId)
-	if e1 != nil {
-		return e1
-	}
-	if e := p.WriteString(ctx, s); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TJSONProtocol) WriteFieldEnd(ctx context.Context) error {
-	return p.OutputObjectEnd()
-}
-
-func (p *TJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
-
-func (p *TJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
-	if e := p.OutputListBegin(); e != nil {
-		return e
-	}
-	s, e1 := p.TypeIdToString(keyType)
-	if e1 != nil {
-		return e1
-	}
-	if e := p.WriteString(ctx, s); e != nil {
-		return e
-	}
-	s, e1 = p.TypeIdToString(valueType)
-	if e1 != nil {
-		return e1
-	}
-	if e := p.WriteString(ctx, s); e != nil {
-		return e
-	}
-	if e := p.WriteI64(ctx, int64(size)); e != nil {
-		return e
-	}
-	return p.OutputObjectBegin()
-}
-
-func (p *TJSONProtocol) WriteMapEnd(ctx context.Context) error {
-	if e := p.OutputObjectEnd(); e != nil {
-		return e
-	}
-	return p.OutputListEnd()
-}
-
-func (p *TJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
-	return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TJSONProtocol) WriteListEnd(ctx context.Context) error {
-	return p.OutputListEnd()
-}
-
-func (p *TJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
-	return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TJSONProtocol) WriteSetEnd(ctx context.Context) error {
-	return p.OutputListEnd()
-}
-
-func (p *TJSONProtocol) WriteBool(ctx context.Context, b bool) error {
-	if b {
-		return p.WriteI32(ctx, 1)
-	}
-	return p.WriteI32(ctx, 0)
-}
-
-func (p *TJSONProtocol) WriteByte(ctx context.Context, b int8) error {
-	return p.WriteI32(ctx, int32(b))
-}
-
-func (p *TJSONProtocol) WriteI16(ctx context.Context, v int16) error {
-	return p.WriteI32(ctx, int32(v))
-}
-
-func (p *TJSONProtocol) WriteI32(ctx context.Context, v int32) error {
-	return p.OutputI64(int64(v))
-}
-
-func (p *TJSONProtocol) WriteI64(ctx context.Context, v int64) error {
-	return p.OutputI64(int64(v))
-}
-
-func (p *TJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
-	return p.OutputF64(v)
-}
-
-func (p *TJSONProtocol) WriteString(ctx context.Context, v string) error {
-	return p.OutputString(v)
-}
-
-func (p *TJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
-	// JSON library only takes in a string,
-	// not an arbitrary byte array, to ensure bytes are transmitted
-	// efficiently we must convert this into a valid JSON string
-	// therefore we use base64 encoding to avoid excessive escaping/quoting
-	if e := p.OutputPreValue(); e != nil {
-		return e
-	}
-	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
-		return NewTProtocolException(e)
-	}
-	writer := base64.NewEncoder(base64.StdEncoding, p.writer)
-	if _, e := writer.Write(v); e != nil {
-		p.writer.Reset(p.trans) // THRIFT-3735
-		return NewTProtocolException(e)
-	}
-	if e := writer.Close(); e != nil {
-		return NewTProtocolException(e)
-	}
-	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
-		return NewTProtocolException(e)
-	}
-	return p.OutputPostValue()
-}
-
-// Reading methods.
-func (p *TJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
-	p.resetContextStack() // THRIFT-3735
-	if isNull, err := p.ParseListBegin(); isNull || err != nil {
-		return name, typeId, seqId, err
-	}
-	version, err := p.ReadI32(ctx)
-	if err != nil {
-		return name, typeId, seqId, err
-	}
-	if version != THRIFT_JSON_PROTOCOL_VERSION {
-		e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
-		return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
-
-	}
-	if name, err = p.ReadString(ctx); err != nil {
-		return name, typeId, seqId, err
-	}
-	bTypeId, err := p.ReadByte(ctx)
-	typeId = TMessageType(bTypeId)
-	if err != nil {
-		return name, typeId, seqId, err
-	}
-	if seqId, err = p.ReadI32(ctx); err != nil {
-		return name, typeId, seqId, err
-	}
-	return name, typeId, seqId, nil
-}
-
-func (p *TJSONProtocol) ReadMessageEnd(ctx context.Context) error {
-	err := p.ParseListEnd()
-	return err
-}
-
-func (p *TJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
-	_, err = p.ParseObjectStart()
-	return "", err
-}
-
-func (p *TJSONProtocol) ReadStructEnd(ctx context.Context) error {
-	return p.ParseObjectEnd()
-}
-
-func (p *TJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
-	b, _ := p.reader.Peek(1)
-	if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
-		return "", STOP, -1, nil
-	}
-	fieldId, err := p.ReadI16(ctx)
-	if err != nil {
-		return "", STOP, fieldId, err
-	}
-	if _, err = p.ParseObjectStart(); err != nil {
-		return "", STOP, fieldId, err
-	}
-	sType, err := p.ReadString(ctx)
-	if err != nil {
-		return "", STOP, fieldId, err
-	}
-	fType, err := p.StringToTypeId(sType)
-	return "", fType, fieldId, err
-}
-
-func (p *TJSONProtocol) ReadFieldEnd(ctx context.Context) error {
-	return p.ParseObjectEnd()
-}
-
-func (p *TJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
-	if isNull, e := p.ParseListBegin(); isNull || e != nil {
-		return VOID, VOID, 0, e
-	}
-
-	// read keyType
-	sKeyType, e := p.ReadString(ctx)
-	if e != nil {
-		return keyType, valueType, size, e
-	}
-	keyType, e = p.StringToTypeId(sKeyType)
-	if e != nil {
-		return keyType, valueType, size, e
-	}
-
-	// read valueType
-	sValueType, e := p.ReadString(ctx)
-	if e != nil {
-		return keyType, valueType, size, e
-	}
-	valueType, e = p.StringToTypeId(sValueType)
-	if e != nil {
-		return keyType, valueType, size, e
-	}
-
-	// read size
-	iSize, e := p.ReadI64(ctx)
-	if e != nil {
-		return keyType, valueType, size, e
-	}
-	size = int(iSize)
-
-	_, e = p.ParseObjectStart()
-	return keyType, valueType, size, e
-}
-
-func (p *TJSONProtocol) ReadMapEnd(ctx context.Context) error {
-	e := p.ParseObjectEnd()
-	if e != nil {
-		return e
-	}
-	return p.ParseListEnd()
-}
-
-func (p *TJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
-	return p.ParseElemListBegin()
-}
-
-func (p *TJSONProtocol) ReadListEnd(ctx context.Context) error {
-	return p.ParseListEnd()
-}
-
-func (p *TJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
-	return p.ParseElemListBegin()
-}
-
-func (p *TJSONProtocol) ReadSetEnd(ctx context.Context) error {
-	return p.ParseListEnd()
-}
-
-func (p *TJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
-	value, err := p.ReadI32(ctx)
-	return (value != 0), err
-}
-
-func (p *TJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
-	v, err := p.ReadI64(ctx)
-	return int8(v), err
-}
-
-func (p *TJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
-	v, err := p.ReadI64(ctx)
-	return int16(v), err
-}
-
-func (p *TJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
-	v, err := p.ReadI64(ctx)
-	return int32(v), err
-}
-
-func (p *TJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
-	v, _, err := p.ParseI64()
-	return v, err
-}
-
-func (p *TJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
-	v, _, err := p.ParseF64()
-	return v, err
-}
-
-func (p *TJSONProtocol) ReadString(ctx context.Context) (string, error) {
-	var v string
-	if err := p.ParsePreValue(); err != nil {
-		return v, err
-	}
-	f, _ := p.reader.Peek(1)
-	if len(f) > 0 && f[0] == JSON_QUOTE {
-		p.reader.ReadByte()
-		value, err := p.ParseStringBody()
-		v = value
-		if err != nil {
-			return v, err
-		}
-	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
-		b := make([]byte, len(JSON_NULL))
-		_, err := p.reader.Read(b)
-		if err != nil {
-			return v, NewTProtocolException(err)
-		}
-		if string(b) != string(JSON_NULL) {
-			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
-			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
-		}
-	} else {
-		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
-		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
-	}
-	return v, p.ParsePostValue()
-}
-
-func (p *TJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
-	var v []byte
-	if err := p.ParsePreValue(); err != nil {
-		return nil, err
-	}
-	f, _ := p.reader.Peek(1)
-	if len(f) > 0 && f[0] == JSON_QUOTE {
-		p.reader.ReadByte()
-		value, err := p.ParseBase64EncodedBody()
-		v = value
-		if err != nil {
-			return v, err
-		}
-	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
-		b := make([]byte, len(JSON_NULL))
-		_, err := p.reader.Read(b)
-		if err != nil {
-			return v, NewTProtocolException(err)
-		}
-		if string(b) != string(JSON_NULL) {
-			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
-			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
-		}
-	} else {
-		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
-		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
-	}
-
-	return v, p.ParsePostValue()
-}
-
-func (p *TJSONProtocol) Flush(ctx context.Context) (err error) {
-	err = p.writer.Flush()
-	if err == nil {
-		err = p.trans.Flush(ctx)
-	}
-	return NewTProtocolException(err)
-}
-
-func (p *TJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
-	return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TJSONProtocol) Transport() TTransport {
-	return p.trans
-}
-
-func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
-	if e := p.OutputListBegin(); e != nil {
-		return e
-	}
-	s, e1 := p.TypeIdToString(elemType)
-	if e1 != nil {
-		return e1
-	}
-	if e := p.OutputString(s); e != nil {
-		return e
-	}
-	if e := p.OutputI64(int64(size)); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
-	if isNull, e := p.ParseListBegin(); isNull || e != nil {
-		return VOID, 0, e
-	}
-	// We don't really use the ctx in ReadString implementation,
-	// so this is safe for now.
-	// We might want to add context to ParseElemListBegin if we start to use
-	// ctx in ReadString implementation in the future.
-	sElemType, err := p.ReadString(context.Background())
-	if err != nil {
-		return VOID, size, err
-	}
-	elemType, err = p.StringToTypeId(sElemType)
-	if err != nil {
-		return elemType, size, err
-	}
-	nSize, _, err2 := p.ParseI64()
-	size = int(nSize)
-	return elemType, size, err2
-}
-
-func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
-	if isNull, e := p.ParseListBegin(); isNull || e != nil {
-		return VOID, 0, e
-	}
-	// We don't really use the ctx in ReadString implementation,
-	// so this is safe for now.
-	// We might want to add context to ParseElemListBegin if we start to use
-	// ctx in ReadString implementation in the future.
-	sElemType, err := p.ReadString(context.Background())
-	if err != nil {
-		return VOID, size, err
-	}
-	elemType, err = p.StringToTypeId(sElemType)
-	if err != nil {
-		return elemType, size, err
-	}
-	nSize, _, err2 := p.ParseI64()
-	size = int(nSize)
-	return elemType, size, err2
-}
-
-func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
-	if e := p.OutputListBegin(); e != nil {
-		return e
-	}
-	s, e1 := p.TypeIdToString(elemType)
-	if e1 != nil {
-		return e1
-	}
-	if e := p.OutputString(s); e != nil {
-		return e
-	}
-	if e := p.OutputI64(int64(size)); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
-	switch byte(fieldType) {
-	case BOOL:
-		return "tf", nil
-	case BYTE:
-		return "i8", nil
-	case I16:
-		return "i16", nil
-	case I32:
-		return "i32", nil
-	case I64:
-		return "i64", nil
-	case DOUBLE:
-		return "dbl", nil
-	case STRING:
-		return "str", nil
-	case STRUCT:
-		return "rec", nil
-	case MAP:
-		return "map", nil
-	case SET:
-		return "set", nil
-	case LIST:
-		return "lst", nil
-	}
-
-	e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
-	return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
-}
-
-func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
-	switch fieldType {
-	case "tf":
-		return TType(BOOL), nil
-	case "i8":
-		return TType(BYTE), nil
-	case "i16":
-		return TType(I16), nil
-	case "i32":
-		return TType(I32), nil
-	case "i64":
-		return TType(I64), nil
-	case "dbl":
-		return TType(DOUBLE), nil
-	case "str":
-		return TType(STRING), nil
-	case "rec":
-		return TType(STRUCT), nil
-	case "map":
-		return TType(MAP), nil
-	case "set":
-		return TType(SET), nil
-	case "lst":
-		return TType(LIST), nil
-	}
-
-	e := fmt.Errorf("Unknown type identifier: %s", fieldType)
-	return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
-}
-
-var _ TConfigurationSetter = (*TJSONProtocol)(nil)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go
deleted file mode 100644
index c42aac998b7..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/logger.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"log"
-	"os"
-	"testing"
-)
-
-// Logger is a simple wrapper of a logging function.
-//
-// In reality the users might actually use different logging libraries, and they
-// are not always compatible with each other.
-//
-// Logger is meant to be a simple common ground that it's easy to wrap whatever
-// logging library they use into.
-//
-// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
-// discussion behind it.
-type Logger func(msg string)
-
-// NopLogger is a Logger implementation that does nothing.
-func NopLogger(msg string) {}
-
-// StdLogger wraps stdlib log package into a Logger.
-//
-// If logger passed in is nil, it will fallback to use stderr and default flags.
-func StdLogger(logger *log.Logger) Logger {
-	if logger == nil {
-		logger = log.New(os.Stderr, "", log.LstdFlags)
-	}
-	return func(msg string) {
-		logger.Print(msg)
-	}
-}
-
-// TestLogger is a Logger implementation can be used in test codes.
-//
-// It fails the test when being called.
-func TestLogger(tb testing.TB) Logger {
-	return func(msg string) {
-		tb.Errorf("logger called with msg: %q", msg)
-	}
-}
-
-func fallbackLogger(logger Logger) Logger {
-	if logger == nil {
-		return StdLogger(nil)
-	}
-	return logger
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go
deleted file mode 100644
index 5936d273037..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/memory_buffer.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"bytes"
-	"context"
-)
-
-// Memory buffer-based implementation of the TTransport interface.
-type TMemoryBuffer struct {
-	*bytes.Buffer
-	size int
-}
-
-type TMemoryBufferTransportFactory struct {
-	size int
-}
-
-func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
-	if trans != nil {
-		t, ok := trans.(*TMemoryBuffer)
-		if ok && t.size > 0 {
-			return NewTMemoryBufferLen(t.size), nil
-		}
-	}
-	return NewTMemoryBufferLen(p.size), nil
-}
-
-func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
-	return &TMemoryBufferTransportFactory{size: size}
-}
-
-func NewTMemoryBuffer() *TMemoryBuffer {
-	return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
-}
-
-func NewTMemoryBufferLen(size int) *TMemoryBuffer {
-	buf := make([]byte, 0, size)
-	return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
-}
-
-func (p *TMemoryBuffer) IsOpen() bool {
-	return true
-}
-
-func (p *TMemoryBuffer) Open() error {
-	return nil
-}
-
-func (p *TMemoryBuffer) Close() error {
-	p.Buffer.Reset()
-	return nil
-}
-
-// Flushing a memory buffer is a no-op
-func (p *TMemoryBuffer) Flush(ctx context.Context) error {
-	return nil
-}
-
-func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
-	return uint64(p.Buffer.Len())
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go
deleted file mode 100644
index 25ab2e98a25..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/messagetype.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Message type constants in the Thrift protocol.
-type TMessageType int32
-
-const (
-	INVALID_TMESSAGE_TYPE TMessageType = 0
-	CALL                  TMessageType = 1
-	REPLY                 TMessageType = 2
-	EXCEPTION             TMessageType = 3
-	ONEWAY                TMessageType = 4
-)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go
deleted file mode 100644
index 8a788df02be..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/middleware.go
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import "context"
-
-// ProcessorMiddleware is a function that can be passed to WrapProcessor to wrap the
-// TProcessorFunctions for that TProcessor.
-//
-// Middlewares are passed in the name of the function as set in the processor
-// map of the TProcessor.
-type ProcessorMiddleware func(name string, next TProcessorFunction) TProcessorFunction
-
-// WrapProcessor takes an existing TProcessor and wraps each of its inner
-// TProcessorFunctions with the middlewares passed in and returns it.
-//
-// Middlewares will be called in the order that they are defined:
-//
-//		1. Middlewares[0]
-//		2. Middlewares[1]
-//		...
-//		N. Middlewares[n]
-func WrapProcessor(processor TProcessor, middlewares ...ProcessorMiddleware) TProcessor {
-	for name, processorFunc := range processor.ProcessorMap() {
-		wrapped := processorFunc
-		// Add middlewares in reverse so the first in the list is the outermost.
-		for i := len(middlewares) - 1; i >= 0; i-- {
-			wrapped = middlewares[i](name, wrapped)
-		}
-		processor.AddToProcessorMap(name, wrapped)
-	}
-	return processor
-}
-
-// WrappedTProcessorFunction is a convenience struct that implements the
-// TProcessorFunction interface that can be used when implementing custom
-// Middleware.
-type WrappedTProcessorFunction struct {
-	// Wrapped is called by WrappedTProcessorFunction.Process and should be a
-	// "wrapped" call to a base TProcessorFunc.Process call.
-	Wrapped func(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
-}
-
-// Process implements the TProcessorFunction interface using p.Wrapped.
-func (p WrappedTProcessorFunction) Process(ctx context.Context, seqID int32, in, out TProtocol) (bool, TException) {
-	return p.Wrapped(ctx, seqID, in, out)
-}
-
-// verify that WrappedTProcessorFunction implements TProcessorFunction
-var (
-	_ TProcessorFunction = WrappedTProcessorFunction{}
-	_ TProcessorFunction = (*WrappedTProcessorFunction)(nil)
-)
-
-// ClientMiddleware can be passed to WrapClient in order to wrap TClient calls
-// with custom middleware.
-type ClientMiddleware func(TClient) TClient
-
-// WrappedTClient is a convenience struct that implements the TClient interface
-// using inner Wrapped function.
-//
-// This is provided to aid in developing ClientMiddleware.
-type WrappedTClient struct {
-	Wrapped func(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
-}
-
-// Call implements the TClient interface by calling and returning c.Wrapped.
-func (c WrappedTClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
-	return c.Wrapped(ctx, method, args, result)
-}
-
-// verify that WrappedTClient implements TClient
-var (
-	_ TClient = WrappedTClient{}
-	_ TClient = (*WrappedTClient)(nil)
-)
-
-// WrapClient wraps the given TClient in the given middlewares.
-//
-// Middlewares will be called in the order that they are defined:
-//
-//		1. Middlewares[0]
-//		2. Middlewares[1]
-//		...
-//		N. Middlewares[n]
-func WrapClient(client TClient, middlewares ...ClientMiddleware) TClient {
-	// Add middlewares in reverse so the first in the list is the outermost.
-	for i := len(middlewares) - 1; i >= 0; i-- {
-		client = middlewares[i](client)
-	}
-	return client
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go
deleted file mode 100644
index d542b23a998..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/multiplexed_protocol.go
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"fmt"
-	"strings"
-)
-
-/*
-TMultiplexedProtocol is a protocol-independent concrete decorator
-that allows a Thrift client to communicate with a multiplexing Thrift server,
-by prepending the service name to the function name during function calls.
-
-NOTE: THIS IS NOT USED BY SERVERS.  On the server, use TMultiplexedProcessor to handle request
-from a multiplexing client.
-
-This example uses a single socket transport to invoke two services:
-
-socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
-transport := thrift.NewTFramedTransport(socket)
-protocol := thrift.NewTBinaryProtocolTransport(transport)
-
-mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
-service := Calculator.NewCalculatorClient(mp)
-
-mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
-service2 := WeatherReport.NewWeatherReportClient(mp2)
-
-err := transport.Open()
-if err != nil {
-	t.Fatal("Unable to open client socket", err)
-}
-
-fmt.Println(service.Add(2,2))
-fmt.Println(service2.GetTemperature())
-*/
-
-type TMultiplexedProtocol struct {
-	TProtocol
-	serviceName string
-}
-
-const MULTIPLEXED_SEPARATOR = ":"
-
-func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
-	return &TMultiplexedProtocol{
-		TProtocol:   protocol,
-		serviceName: serviceName,
-	}
-}
-
-func (t *TMultiplexedProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
-	if typeId == CALL || typeId == ONEWAY {
-		return t.TProtocol.WriteMessageBegin(ctx, t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
-	} else {
-		return t.TProtocol.WriteMessageBegin(ctx, name, typeId, seqid)
-	}
-}
-
-/*
-TMultiplexedProcessor is a TProcessor allowing
-a single TServer to provide multiple services.
-
-To do so, you instantiate the processor and then register additional
-processors with it, as shown in the following example:
-
-var processor = thrift.NewTMultiplexedProcessor()
-
-firstProcessor :=
-processor.RegisterProcessor("FirstService", firstProcessor)
-
-processor.registerProcessor(
-  "Calculator",
-  Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
-)
-
-processor.registerProcessor(
-  "WeatherReport",
-  WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
-)
-
-serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
-if err != nil {
-  t.Fatal("Unable to create server socket", err)
-}
-server := thrift.NewTSimpleServer2(processor, serverTransport)
-server.Serve();
-*/
-
-type TMultiplexedProcessor struct {
-	serviceProcessorMap map[string]TProcessor
-	DefaultProcessor    TProcessor
-}
-
-func NewTMultiplexedProcessor() *TMultiplexedProcessor {
-	return &TMultiplexedProcessor{
-		serviceProcessorMap: make(map[string]TProcessor),
-	}
-}
-
-// ProcessorMap returns a mapping of "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}"
-// to TProcessorFunction for any registered processors.  If there is also a
-// DefaultProcessor, the keys for the methods on that processor will simply be
-// "{FunctionName}".  If the TMultiplexedProcessor has both a DefaultProcessor and
-// other registered processors, then the keys will be a mix of both formats.
-//
-// The implementation differs with other TProcessors in that the map returned is
-// a new map, while most TProcessors just return their internal mapping directly.
-// This means that edits to the map returned by this implementation of ProcessorMap
-// will not affect the underlying mapping within the TMultiplexedProcessor.
-func (t *TMultiplexedProcessor) ProcessorMap() map[string]TProcessorFunction {
-	processorFuncMap := make(map[string]TProcessorFunction)
-	for name, processor := range t.serviceProcessorMap {
-		for method, processorFunc := range processor.ProcessorMap() {
-			processorFuncName := name + MULTIPLEXED_SEPARATOR + method
-			processorFuncMap[processorFuncName] = processorFunc
-		}
-	}
-	if t.DefaultProcessor != nil {
-		for method, processorFunc := range t.DefaultProcessor.ProcessorMap() {
-			processorFuncMap[method] = processorFunc
-		}
-	}
-	return processorFuncMap
-}
-
-// AddToProcessorMap updates the underlying TProcessor ProccessorMaps depending on
-// the format of "name".
-//
-// If "name" is in the format "{ProcessorName}{MULTIPLEXED_SEPARATOR}{FunctionName}",
-// then it sets the given TProcessorFunction on the inner TProcessor with the
-// ProcessorName component using the FunctionName component.
-//
-// If "name" is just in the format "{FunctionName}", that is to say there is no
-// MULTIPLEXED_SEPARATOR, and the TMultiplexedProcessor has a DefaultProcessor
-// configured, then it will set the given TProcessorFunction on the DefaultProcessor
-// using the given name.
-//
-// If there is not a TProcessor available for the given name, then this function
-// does nothing.  This can happen when there is no TProcessor registered for
-// the given ProcessorName or if all that is given is the FunctionName and there
-// is no DefaultProcessor set.
-func (t *TMultiplexedProcessor) AddToProcessorMap(name string, processorFunc TProcessorFunction) {
-	processorName, funcName, found := strings.Cut(name, MULTIPLEXED_SEPARATOR)
-	if !found {
-		if t.DefaultProcessor != nil {
-			t.DefaultProcessor.AddToProcessorMap(processorName, processorFunc)
-		}
-		return
-	}
-	if processor, ok := t.serviceProcessorMap[processorName]; ok {
-		processor.AddToProcessorMap(funcName, processorFunc)
-	}
-
-}
-
-// verify that TMultiplexedProcessor implements TProcessor
-var _ TProcessor = (*TMultiplexedProcessor)(nil)
-
-func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
-	t.DefaultProcessor = processor
-}
-
-func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
-	if t.serviceProcessorMap == nil {
-		t.serviceProcessorMap = make(map[string]TProcessor)
-	}
-	t.serviceProcessorMap[name] = processor
-}
-
-func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
-	name, typeId, seqid, err := in.ReadMessageBegin(ctx)
-	if err != nil {
-		return false, NewTProtocolException(err)
-	}
-	if typeId != CALL && typeId != ONEWAY {
-		return false, NewTProtocolException(fmt.Errorf("Unexpected message type %v", typeId))
-	}
-	// extract the service name
-	processorName, funcName, found := strings.Cut(name, MULTIPLEXED_SEPARATOR)
-	if !found {
-		if t.DefaultProcessor != nil {
-			smb := NewStoredMessageProtocol(in, name, typeId, seqid)
-			return t.DefaultProcessor.Process(ctx, smb, out)
-		}
-		return false, NewTProtocolException(fmt.Errorf(
-			"Service name not found in message name: %s.  Did you forget to use a TMultiplexProtocol in your client?",
-			name,
-		))
-	}
-	actualProcessor, ok := t.serviceProcessorMap[processorName]
-	if !ok {
-		return false, NewTProtocolException(fmt.Errorf(
-			"Service name not found: %s.  Did you forget to call registerProcessor()?",
-			processorName,
-		))
-	}
-	smb := NewStoredMessageProtocol(in, funcName, typeId, seqid)
-	return actualProcessor.Process(ctx, smb, out)
-}
-
-// Protocol that use stored message for ReadMessageBegin
-type storedMessageProtocol struct {
-	TProtocol
-	name   string
-	typeId TMessageType
-	seqid  int32
-}
-
-func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
-	return &storedMessageProtocol{protocol, name, typeId, seqid}
-}
-
-func (s *storedMessageProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error) {
-	return s.name, s.typeId, s.seqid, nil
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go
deleted file mode 100644
index e4512d204c0..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/numeric.go
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"math"
-	"strconv"
-)
-
-type Numeric interface {
-	Int64() int64
-	Int32() int32
-	Int16() int16
-	Byte() byte
-	Int() int
-	Float64() float64
-	Float32() float32
-	String() string
-	isNull() bool
-}
-
-type numeric struct {
-	iValue int64
-	dValue float64
-	sValue string
-	isNil  bool
-}
-
-var (
-	INFINITY          Numeric
-	NEGATIVE_INFINITY Numeric
-	NAN               Numeric
-	ZERO              Numeric
-	NUMERIC_NULL      Numeric
-)
-
-func NewNumericFromDouble(dValue float64) Numeric {
-	if math.IsInf(dValue, 1) {
-		return INFINITY
-	}
-	if math.IsInf(dValue, -1) {
-		return NEGATIVE_INFINITY
-	}
-	if math.IsNaN(dValue) {
-		return NAN
-	}
-	iValue := int64(dValue)
-	sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
-	isNil := false
-	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromI64(iValue int64) Numeric {
-	dValue := float64(iValue)
-	sValue := strconv.FormatInt(iValue, 10)
-	isNil := false
-	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromI32(iValue int32) Numeric {
-	dValue := float64(iValue)
-	sValue := strconv.FormatInt(int64(iValue), 10)
-	isNil := false
-	return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromString(sValue string) Numeric {
-	if sValue == INFINITY.String() {
-		return INFINITY
-	}
-	if sValue == NEGATIVE_INFINITY.String() {
-		return NEGATIVE_INFINITY
-	}
-	if sValue == NAN.String() {
-		return NAN
-	}
-	iValue, _ := strconv.ParseInt(sValue, 10, 64)
-	dValue, _ := strconv.ParseFloat(sValue, 64)
-	isNil := len(sValue) == 0
-	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
-}
-
-func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
-	if isNull {
-		return NewNullNumeric()
-	}
-	if sValue == JSON_INFINITY {
-		return INFINITY
-	}
-	if sValue == JSON_NEGATIVE_INFINITY {
-		return NEGATIVE_INFINITY
-	}
-	if sValue == JSON_NAN {
-		return NAN
-	}
-	iValue, _ := strconv.ParseInt(sValue, 10, 64)
-	dValue, _ := strconv.ParseFloat(sValue, 64)
-	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
-}
-
-func NewNullNumeric() Numeric {
-	return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
-}
-
-func (p *numeric) Int64() int64 {
-	return p.iValue
-}
-
-func (p *numeric) Int32() int32 {
-	return int32(p.iValue)
-}
-
-func (p *numeric) Int16() int16 {
-	return int16(p.iValue)
-}
-
-func (p *numeric) Byte() byte {
-	return byte(p.iValue)
-}
-
-func (p *numeric) Int() int {
-	return int(p.iValue)
-}
-
-func (p *numeric) Float64() float64 {
-	return p.dValue
-}
-
-func (p *numeric) Float32() float32 {
-	return float32(p.dValue)
-}
-
-func (p *numeric) String() string {
-	return p.sValue
-}
-
-func (p *numeric) isNull() bool {
-	return p.isNil
-}
-
-func init() {
-	INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
-	NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
-	NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
-	ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
-	NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go
deleted file mode 100644
index fb564ea8193..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/pointerize.go
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-///////////////////////////////////////////////////////////////////////////////
-// This file is home to helpers that convert from various base types to
-// respective pointer types. This is necessary because Go does not permit
-// references to constants, nor can a pointer type to base type be allocated
-// and initialized in a single expression.
-//
-// E.g., this is not allowed:
-//
-//    var ip *int = &5
-//
-// But this *is* allowed:
-//
-//    func IntPtr(i int) *int { return &i }
-//    var ip *int = IntPtr(5)
-//
-// Since pointers to base types are commonplace as [optional] fields in
-// exported thrift structs, we factor such helpers here.
-///////////////////////////////////////////////////////////////////////////////
-
-func Float32Ptr(v float32) *float32 { return &v }
-func Float64Ptr(v float64) *float64 { return &v }
-func IntPtr(v int) *int             { return &v }
-func Int8Ptr(v int8) *int8          { return &v }
-func Int16Ptr(v int16) *int16       { return &v }
-func Int32Ptr(v int32) *int32       { return &v }
-func Int64Ptr(v int64) *int64       { return &v }
-func StringPtr(v string) *string    { return &v }
-func Uint32Ptr(v uint32) *uint32    { return &v }
-func Uint64Ptr(v uint64) *uint64    { return &v }
-func BoolPtr(v bool) *bool          { return &v }
-func ByteSlicePtr(v []byte) *[]byte { return &v }
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go
deleted file mode 100644
index 245a3ccfc98..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/processor_factory.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import "context"
-
-// A processor is a generic object which operates upon an input stream and
-// writes to some output stream.
-type TProcessor interface {
-	Process(ctx context.Context, in, out TProtocol) (bool, TException)
-
-	// ProcessorMap returns a map of thrift method names to TProcessorFunctions.
-	ProcessorMap() map[string]TProcessorFunction
-
-	// AddToProcessorMap adds the given TProcessorFunction to the internal
-	// processor map at the given key.
-	//
-	// If one is already set at the given key, it will be replaced with the new
-	// TProcessorFunction.
-	AddToProcessorMap(string, TProcessorFunction)
-}
-
-type TProcessorFunction interface {
-	Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
-}
-
-// The default processor factory just returns a singleton
-// instance.
-type TProcessorFactory interface {
-	GetProcessor(trans TTransport) TProcessor
-}
-
-type tProcessorFactory struct {
-	processor TProcessor
-}
-
-func NewTProcessorFactory(p TProcessor) TProcessorFactory {
-	return &tProcessorFactory{processor: p}
-}
-
-func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
-	return p.processor
-}
-
-/**
- * The default processor factory just returns a singleton
- * instance.
- */
-type TProcessorFunctionFactory interface {
-	GetProcessorFunction(trans TTransport) TProcessorFunction
-}
-
-type tProcessorFunctionFactory struct {
-	processor TProcessorFunction
-}
-
-func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
-	return &tProcessorFunctionFactory{processor: p}
-}
-
-func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
-	return p.processor
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go
deleted file mode 100644
index 0a69bd4162d..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol.go
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"errors"
-	"fmt"
-)
-
-const (
-	VERSION_MASK = 0xffff0000
-	VERSION_1    = 0x80010000
-)
-
-type TProtocol interface {
-	WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
-	WriteMessageEnd(ctx context.Context) error
-	WriteStructBegin(ctx context.Context, name string) error
-	WriteStructEnd(ctx context.Context) error
-	WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
-	WriteFieldEnd(ctx context.Context) error
-	WriteFieldStop(ctx context.Context) error
-	WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
-	WriteMapEnd(ctx context.Context) error
-	WriteListBegin(ctx context.Context, elemType TType, size int) error
-	WriteListEnd(ctx context.Context) error
-	WriteSetBegin(ctx context.Context, elemType TType, size int) error
-	WriteSetEnd(ctx context.Context) error
-	WriteBool(ctx context.Context, value bool) error
-	WriteByte(ctx context.Context, value int8) error
-	WriteI16(ctx context.Context, value int16) error
-	WriteI32(ctx context.Context, value int32) error
-	WriteI64(ctx context.Context, value int64) error
-	WriteDouble(ctx context.Context, value float64) error
-	WriteString(ctx context.Context, value string) error
-	WriteBinary(ctx context.Context, value []byte) error
-
-	ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
-	ReadMessageEnd(ctx context.Context) error
-	ReadStructBegin(ctx context.Context) (name string, err error)
-	ReadStructEnd(ctx context.Context) error
-	ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
-	ReadFieldEnd(ctx context.Context) error
-	ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
-	ReadMapEnd(ctx context.Context) error
-	ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
-	ReadListEnd(ctx context.Context) error
-	ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
-	ReadSetEnd(ctx context.Context) error
-	ReadBool(ctx context.Context) (value bool, err error)
-	ReadByte(ctx context.Context) (value int8, err error)
-	ReadI16(ctx context.Context) (value int16, err error)
-	ReadI32(ctx context.Context) (value int32, err error)
-	ReadI64(ctx context.Context) (value int64, err error)
-	ReadDouble(ctx context.Context) (value float64, err error)
-	ReadString(ctx context.Context) (value string, err error)
-	ReadBinary(ctx context.Context) (value []byte, err error)
-
-	Skip(ctx context.Context, fieldType TType) (err error)
-	Flush(ctx context.Context) (err error)
-
-	Transport() TTransport
-}
-
-// The maximum recursive depth the skip() function will traverse
-const DEFAULT_RECURSION_DEPTH = 64
-
-// Skips over the next data element from the provided input TProtocol object.
-func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
-	return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
-}
-
-// Skips over the next data element from the provided input TProtocol object.
-func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
-
-	if maxDepth <= 0 {
-		return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
-	}
-
-	switch fieldType {
-	case BOOL:
-		_, err = self.ReadBool(ctx)
-		return
-	case BYTE:
-		_, err = self.ReadByte(ctx)
-		return
-	case I16:
-		_, err = self.ReadI16(ctx)
-		return
-	case I32:
-		_, err = self.ReadI32(ctx)
-		return
-	case I64:
-		_, err = self.ReadI64(ctx)
-		return
-	case DOUBLE:
-		_, err = self.ReadDouble(ctx)
-		return
-	case STRING:
-		_, err = self.ReadString(ctx)
-		return
-	case STRUCT:
-		if _, err = self.ReadStructBegin(ctx); err != nil {
-			return err
-		}
-		for {
-			_, typeId, _, _ := self.ReadFieldBegin(ctx)
-			if typeId == STOP {
-				break
-			}
-			err := Skip(ctx, self, typeId, maxDepth-1)
-			if err != nil {
-				return err
-			}
-			self.ReadFieldEnd(ctx)
-		}
-		return self.ReadStructEnd(ctx)
-	case MAP:
-		keyType, valueType, size, err := self.ReadMapBegin(ctx)
-		if err != nil {
-			return err
-		}
-		for i := 0; i < size; i++ {
-			err := Skip(ctx, self, keyType, maxDepth-1)
-			if err != nil {
-				return err
-			}
-			self.Skip(ctx, valueType)
-		}
-		return self.ReadMapEnd(ctx)
-	case SET:
-		elemType, size, err := self.ReadSetBegin(ctx)
-		if err != nil {
-			return err
-		}
-		for i := 0; i < size; i++ {
-			err := Skip(ctx, self, elemType, maxDepth-1)
-			if err != nil {
-				return err
-			}
-		}
-		return self.ReadSetEnd(ctx)
-	case LIST:
-		elemType, size, err := self.ReadListBegin(ctx)
-		if err != nil {
-			return err
-		}
-		for i := 0; i < size; i++ {
-			err := Skip(ctx, self, elemType, maxDepth-1)
-			if err != nil {
-				return err
-			}
-		}
-		return self.ReadListEnd(ctx)
-	default:
-		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
-	}
-	return nil
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go
deleted file mode 100644
index 9dcf4bfd94c..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_exception.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"encoding/base64"
-	"errors"
-)
-
-// Thrift Protocol exception
-type TProtocolException interface {
-	TException
-	TypeId() int
-}
-
-const (
-	UNKNOWN_PROTOCOL_EXCEPTION = 0
-	INVALID_DATA               = 1
-	NEGATIVE_SIZE              = 2
-	SIZE_LIMIT                 = 3
-	BAD_VERSION                = 4
-	NOT_IMPLEMENTED            = 5
-	DEPTH_LIMIT                = 6
-)
-
-type tProtocolException struct {
-	typeId int
-	err    error
-	msg    string
-}
-
-var _ TProtocolException = (*tProtocolException)(nil)
-
-func (tProtocolException) TExceptionType() TExceptionType {
-	return TExceptionTypeProtocol
-}
-
-func (p *tProtocolException) TypeId() int {
-	return p.typeId
-}
-
-func (p *tProtocolException) String() string {
-	return p.msg
-}
-
-func (p *tProtocolException) Error() string {
-	return p.msg
-}
-
-func (p *tProtocolException) Unwrap() error {
-	return p.err
-}
-
-func NewTProtocolException(err error) TProtocolException {
-	if err == nil {
-		return nil
-	}
-
-	if e, ok := err.(TProtocolException); ok {
-		return e
-	}
-
-	if errors.As(err, new(base64.CorruptInputError)) {
-		return NewTProtocolExceptionWithType(INVALID_DATA, err)
-	}
-
-	return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
-}
-
-func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
-	if err == nil {
-		return nil
-	}
-	return &tProtocolException{
-		typeId: errType,
-		err:    err,
-		msg:    err.Error(),
-	}
-}
-
-func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
-	return &tProtocolException{
-		typeId: err.TypeId(),
-		err:    err,
-		msg:    prepend + err.Error(),
-	}
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go
deleted file mode 100644
index c40f796d886..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/protocol_factory.go
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Factory interface for constructing protocol instances.
-type TProtocolFactory interface {
-	GetProtocol(trans TTransport) TProtocol
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go
deleted file mode 100644
index d884c6ac6c4..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/response_helper.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-)
-
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
-type responseHelperKey struct{}
-
-// TResponseHelper defines a object with a set of helper functions that can be
-// retrieved from the context object passed into server handler functions.
-//
-// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
-// from the context object.
-//
-// The zero value of TResponseHelper is valid with all helper functions being
-// no-op.
-type TResponseHelper struct {
-	// THeader related functions
-	*THeaderResponseHelper
-}
-
-// THeaderResponseHelper defines THeader related TResponseHelper functions.
-//
-// The zero value of *THeaderResponseHelper is valid with all helper functions
-// being no-op.
-type THeaderResponseHelper struct {
-	proto *THeaderProtocol
-}
-
-// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
-// underlying TProtocol.
-func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
-	if hp, ok := proto.(*THeaderProtocol); ok {
-		return &THeaderResponseHelper{
-			proto: hp,
-		}
-	}
-	return nil
-}
-
-// SetHeader sets a response header.
-//
-// It's no-op if the underlying protocol/transport does not support THeader.
-func (h *THeaderResponseHelper) SetHeader(key, value string) {
-	if h != nil && h.proto != nil {
-		h.proto.SetWriteHeader(key, value)
-	}
-}
-
-// ClearHeaders clears all the response headers previously set.
-//
-// It's no-op if the underlying protocol/transport does not support THeader.
-func (h *THeaderResponseHelper) ClearHeaders() {
-	if h != nil && h.proto != nil {
-		h.proto.ClearWriteHeaders()
-	}
-}
-
-// GetResponseHelper retrieves the TResponseHelper implementation injected into
-// the context object.
-//
-// If no helper was found in the context object, a nop helper with ok == false
-// will be returned.
-func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
-	if v := ctx.Value(responseHelperKey{}); v != nil {
-		helper, ok = v.(TResponseHelper)
-	}
-	return
-}
-
-// SetResponseHelper injects TResponseHelper into the context object.
-func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
-	return context.WithValue(ctx, responseHelperKey{}, helper)
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go
deleted file mode 100644
index 83fdf29f5cb..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/rich_transport.go
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"errors"
-	"io"
-)
-
-type RichTransport struct {
-	TTransport
-}
-
-// Wraps Transport to provide TRichTransport interface
-func NewTRichTransport(trans TTransport) *RichTransport {
-	return &RichTransport{trans}
-}
-
-func (r *RichTransport) ReadByte() (c byte, err error) {
-	return readByte(r.TTransport)
-}
-
-func (r *RichTransport) WriteByte(c byte) error {
-	return writeByte(r.TTransport, c)
-}
-
-func (r *RichTransport) WriteString(s string) (n int, err error) {
-	return r.Write([]byte(s))
-}
-
-func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
-	return r.TTransport.RemainingBytes()
-}
-
-func readByte(r io.Reader) (c byte, err error) {
-	v := [1]byte{0}
-	n, err := r.Read(v[0:1])
-	if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
-		return v[0], nil
-	}
-	if n > 0 && err != nil {
-		return v[0], err
-	}
-	if err != nil {
-		return 0, err
-	}
-	return v[0], nil
-}
-
-func writeByte(w io.Writer, c byte) error {
-	v := [1]byte{c}
-	_, err := w.Write(v[0:1])
-	return err
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go
deleted file mode 100644
index c44979094c6..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/serializer.go
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"sync"
-)
-
-type TSerializer struct {
-	Transport *TMemoryBuffer
-	Protocol  TProtocol
-}
-
-type TStruct interface {
-	Write(ctx context.Context, p TProtocol) error
-	Read(ctx context.Context, p TProtocol) error
-}
-
-func NewTSerializer() *TSerializer {
-	transport := NewTMemoryBufferLen(1024)
-	protocol := NewTBinaryProtocolTransport(transport)
-
-	return &TSerializer{
-		Transport: transport,
-		Protocol:  protocol,
-	}
-}
-
-func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
-	t.Transport.Reset()
-
-	if err = msg.Write(ctx, t.Protocol); err != nil {
-		return
-	}
-
-	if err = t.Protocol.Flush(ctx); err != nil {
-		return
-	}
-	if err = t.Transport.Flush(ctx); err != nil {
-		return
-	}
-
-	return t.Transport.String(), nil
-}
-
-func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
-	t.Transport.Reset()
-
-	if err = msg.Write(ctx, t.Protocol); err != nil {
-		return
-	}
-
-	if err = t.Protocol.Flush(ctx); err != nil {
-		return
-	}
-
-	if err = t.Transport.Flush(ctx); err != nil {
-		return
-	}
-
-	b = append(b, t.Transport.Bytes()...)
-	return
-}
-
-// TSerializerPool is the thread-safe version of TSerializer, it uses resource
-// pool of TSerializer under the hood.
-//
-// It must be initialized with either NewTSerializerPool or
-// NewTSerializerPoolSizeFactory.
-type TSerializerPool struct {
-	pool sync.Pool
-}
-
-// NewTSerializerPool creates a new TSerializerPool.
-//
-// NewTSerializer can be used as the arg here.
-func NewTSerializerPool(f func() *TSerializer) *TSerializerPool {
-	return &TSerializerPool{
-		pool: sync.Pool{
-			New: func() interface{} {
-				return f()
-			},
-		},
-	}
-}
-
-// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given
-// size and protocol factory.
-//
-// Note that the size is not the limit. The TMemoryBuffer underneath can grow
-// larger than that. It just dictates the initial size.
-func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool {
-	return &TSerializerPool{
-		pool: sync.Pool{
-			New: func() interface{} {
-				transport := NewTMemoryBufferLen(size)
-				protocol := factory.GetProtocol(transport)
-
-				return &TSerializer{
-					Transport: transport,
-					Protocol:  protocol,
-				}
-			},
-		},
-	}
-}
-
-func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) {
-	s := t.pool.Get().(*TSerializer)
-	defer t.pool.Put(s)
-	return s.WriteString(ctx, msg)
-}
-
-func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) {
-	s := t.pool.Get().(*TSerializer)
-	defer t.pool.Put(s)
-	return s.Write(ctx, msg)
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go
deleted file mode 100644
index f813fa3532c..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server.go
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-type TServer interface {
-	ProcessorFactory() TProcessorFactory
-	ServerTransport() TServerTransport
-	InputTransportFactory() TTransportFactory
-	OutputTransportFactory() TTransportFactory
-	InputProtocolFactory() TProtocolFactory
-	OutputProtocolFactory() TProtocolFactory
-
-	// Starts the server
-	Serve() error
-	// Stops the server. This is optional on a per-implementation basis. Not
-	// all servers are required to be cleanly stoppable.
-	Stop() error
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go
deleted file mode 100644
index 7dd24ae3648..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_socket.go
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"net"
-	"sync"
-	"time"
-)
-
-type TServerSocket struct {
-	listener      net.Listener
-	addr          net.Addr
-	clientTimeout time.Duration
-
-	// Protects the interrupted value to make it thread safe.
-	mu          sync.RWMutex
-	interrupted bool
-}
-
-func NewTServerSocket(listenAddr string) (*TServerSocket, error) {
-	return NewTServerSocketTimeout(listenAddr, 0)
-}
-
-func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) {
-	addr, err := net.ResolveTCPAddr("tcp", listenAddr)
-	if err != nil {
-		return nil, err
-	}
-	return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
-}
-
-// Creates a TServerSocket from a net.Addr
-func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket {
-	return &TServerSocket{addr: addr, clientTimeout: clientTimeout}
-}
-
-func (p *TServerSocket) Listen() error {
-	p.mu.Lock()
-	defer p.mu.Unlock()
-	if p.IsListening() {
-		return nil
-	}
-	l, err := net.Listen(p.addr.Network(), p.addr.String())
-	if err != nil {
-		return err
-	}
-	p.listener = l
-	return nil
-}
-
-func (p *TServerSocket) Accept() (TTransport, error) {
-	p.mu.RLock()
-	interrupted := p.interrupted
-	p.mu.RUnlock()
-
-	if interrupted {
-		return nil, errTransportInterrupted
-	}
-
-	p.mu.Lock()
-	listener := p.listener
-	p.mu.Unlock()
-	if listener == nil {
-		return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
-	}
-
-	conn, err := listener.Accept()
-	if err != nil {
-		return nil, NewTTransportExceptionFromError(err)
-	}
-	return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil
-}
-
-// Checks whether the socket is listening.
-func (p *TServerSocket) IsListening() bool {
-	return p.listener != nil
-}
-
-// Connects the socket, creating a new socket object if necessary.
-func (p *TServerSocket) Open() error {
-	p.mu.Lock()
-	defer p.mu.Unlock()
-	if p.IsListening() {
-		return NewTTransportException(ALREADY_OPEN, "Server socket already open")
-	}
-	if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
-		return err
-	} else {
-		p.listener = l
-	}
-	return nil
-}
-
-func (p *TServerSocket) Addr() net.Addr {
-	if p.listener != nil {
-		return p.listener.Addr()
-	}
-	return p.addr
-}
-
-func (p *TServerSocket) Close() error {
-	var err error
-	p.mu.Lock()
-	if p.IsListening() {
-		err = p.listener.Close()
-		p.listener = nil
-	}
-	p.mu.Unlock()
-	return err
-}
-
-func (p *TServerSocket) Interrupt() error {
-	p.mu.Lock()
-	p.interrupted = true
-	p.mu.Unlock()
-	p.Close()
-
-	return nil
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go
deleted file mode 100644
index 51c40b64a1d..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/server_transport.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Server transport. Object which provides client transports.
-type TServerTransport interface {
-	Listen() error
-	Accept() (TTransport, error)
-	Close() error
-
-	// Optional method implementation. This signals to the server transport
-	// that it should break out of any accept() or listen() that it is currently
-	// blocked on. This method, if implemented, MUST be thread safe, as it may
-	// be called from a different thread context than the other TServerTransport
-	// methods.
-	Interrupt() error
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go
deleted file mode 100644
index d1a8154532d..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_json_protocol.go
+++ /dev/null
@@ -1,1373 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"bufio"
-	"bytes"
-	"context"
-	"encoding/base64"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"strconv"
-)
-
-type _ParseContext int
-
-const (
-	_CONTEXT_INVALID              _ParseContext = iota
-	_CONTEXT_IN_TOPLEVEL                        // 1
-	_CONTEXT_IN_LIST_FIRST                      // 2
-	_CONTEXT_IN_LIST                            // 3
-	_CONTEXT_IN_OBJECT_FIRST                    // 4
-	_CONTEXT_IN_OBJECT_NEXT_KEY                 // 5
-	_CONTEXT_IN_OBJECT_NEXT_VALUE               // 6
-)
-
-func (p _ParseContext) String() string {
-	switch p {
-	case _CONTEXT_IN_TOPLEVEL:
-		return "TOPLEVEL"
-	case _CONTEXT_IN_LIST_FIRST:
-		return "LIST-FIRST"
-	case _CONTEXT_IN_LIST:
-		return "LIST"
-	case _CONTEXT_IN_OBJECT_FIRST:
-		return "OBJECT-FIRST"
-	case _CONTEXT_IN_OBJECT_NEXT_KEY:
-		return "OBJECT-NEXT-KEY"
-	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
-		return "OBJECT-NEXT-VALUE"
-	}
-	return "UNKNOWN-PARSE-CONTEXT"
-}
-
-type jsonContextStack []_ParseContext
-
-func (s *jsonContextStack) push(v _ParseContext) {
-	*s = append(*s, v)
-}
-
-func (s jsonContextStack) peek() (v _ParseContext, ok bool) {
-	l := len(s)
-	if l <= 0 {
-		return
-	}
-	return s[l-1], true
-}
-
-func (s *jsonContextStack) pop() (v _ParseContext, ok bool) {
-	l := len(*s)
-	if l <= 0 {
-		return
-	}
-	v = (*s)[l-1]
-	*s = (*s)[0 : l-1]
-	return v, true
-}
-
-var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack"))
-
-// Simple JSON protocol implementation for thrift.
-//
-// This protocol produces/consumes a simple output format
-// suitable for parsing by scripting languages.  It should not be
-// confused with the full-featured TJSONProtocol.
-//
-type TSimpleJSONProtocol struct {
-	trans TTransport
-
-	parseContextStack jsonContextStack
-	dumpContext       jsonContextStack
-
-	writer *bufio.Writer
-	reader *bufio.Reader
-}
-
-// Constructor
-func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
-	v := &TSimpleJSONProtocol{trans: t,
-		writer: bufio.NewWriter(t),
-		reader: bufio.NewReader(t),
-	}
-	v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
-	v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
-	return v
-}
-
-// Factory
-type TSimpleJSONProtocolFactory struct{}
-
-func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
-	return NewTSimpleJSONProtocol(trans)
-}
-
-func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
-	return &TSimpleJSONProtocolFactory{}
-}
-
-var (
-	JSON_COMMA                   []byte
-	JSON_COLON                   []byte
-	JSON_LBRACE                  []byte
-	JSON_RBRACE                  []byte
-	JSON_LBRACKET                []byte
-	JSON_RBRACKET                []byte
-	JSON_QUOTE                   byte
-	JSON_QUOTE_BYTES             []byte
-	JSON_NULL                    []byte
-	JSON_TRUE                    []byte
-	JSON_FALSE                   []byte
-	JSON_INFINITY                string
-	JSON_NEGATIVE_INFINITY       string
-	JSON_NAN                     string
-	JSON_INFINITY_BYTES          []byte
-	JSON_NEGATIVE_INFINITY_BYTES []byte
-	JSON_NAN_BYTES               []byte
-	json_nonbase_map_elem_bytes  []byte
-)
-
-func init() {
-	JSON_COMMA = []byte{','}
-	JSON_COLON = []byte{':'}
-	JSON_LBRACE = []byte{'{'}
-	JSON_RBRACE = []byte{'}'}
-	JSON_LBRACKET = []byte{'['}
-	JSON_RBRACKET = []byte{']'}
-	JSON_QUOTE = '"'
-	JSON_QUOTE_BYTES = []byte{'"'}
-	JSON_NULL = []byte{'n', 'u', 'l', 'l'}
-	JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
-	JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
-	JSON_INFINITY = "Infinity"
-	JSON_NEGATIVE_INFINITY = "-Infinity"
-	JSON_NAN = "NaN"
-	JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
-	JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
-	JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
-	json_nonbase_map_elem_bytes = []byte{']', ',', '['}
-}
-
-func jsonQuote(s string) string {
-	b, _ := json.Marshal(s)
-	s1 := string(b)
-	return s1
-}
-
-func jsonUnquote(s string) (string, bool) {
-	s1 := new(string)
-	err := json.Unmarshal([]byte(s), s1)
-	return *s1, err == nil
-}
-
-func mismatch(expected, actual string) error {
-	return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
-}
-
-func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
-	p.resetContextStack() // THRIFT-3735
-	if e := p.OutputListBegin(); e != nil {
-		return e
-	}
-	if e := p.WriteString(ctx, name); e != nil {
-		return e
-	}
-	if e := p.WriteByte(ctx, int8(typeId)); e != nil {
-		return e
-	}
-	if e := p.WriteI32(ctx, seqId); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error {
-	return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
-	if e := p.OutputObjectBegin(); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error {
-	return p.OutputObjectEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
-	if e := p.WriteString(ctx, name); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
-
-func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
-	if e := p.OutputListBegin(); e != nil {
-		return e
-	}
-	if e := p.WriteByte(ctx, int8(keyType)); e != nil {
-		return e
-	}
-	if e := p.WriteByte(ctx, int8(valueType)); e != nil {
-		return e
-	}
-	return p.WriteI32(ctx, int32(size))
-}
-
-func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error {
-	return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
-	return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error {
-	return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
-	return p.OutputElemListBegin(elemType, size)
-}
-
-func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error {
-	return p.OutputListEnd()
-}
-
-func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error {
-	return p.OutputBool(b)
-}
-
-func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error {
-	return p.WriteI32(ctx, int32(b))
-}
-
-func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error {
-	return p.WriteI32(ctx, int32(v))
-}
-
-func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error {
-	return p.OutputI64(int64(v))
-}
-
-func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error {
-	return p.OutputI64(int64(v))
-}
-
-func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
-	return p.OutputF64(v)
-}
-
-func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error {
-	return p.OutputString(v)
-}
-
-func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
-	// JSON library only takes in a string,
-	// not an arbitrary byte array, to ensure bytes are transmitted
-	// efficiently we must convert this into a valid JSON string
-	// therefore we use base64 encoding to avoid excessive escaping/quoting
-	if e := p.OutputPreValue(); e != nil {
-		return e
-	}
-	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
-		return NewTProtocolException(e)
-	}
-	writer := base64.NewEncoder(base64.StdEncoding, p.writer)
-	if _, e := writer.Write(v); e != nil {
-		p.writer.Reset(p.trans) // THRIFT-3735
-		return NewTProtocolException(e)
-	}
-	if e := writer.Close(); e != nil {
-		return NewTProtocolException(e)
-	}
-	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
-		return NewTProtocolException(e)
-	}
-	return p.OutputPostValue()
-}
-
-// Reading methods.
-func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
-	p.resetContextStack() // THRIFT-3735
-	if isNull, err := p.ParseListBegin(); isNull || err != nil {
-		return name, typeId, seqId, err
-	}
-	if name, err = p.ReadString(ctx); err != nil {
-		return name, typeId, seqId, err
-	}
-	bTypeId, err := p.ReadByte(ctx)
-	typeId = TMessageType(bTypeId)
-	if err != nil {
-		return name, typeId, seqId, err
-	}
-	if seqId, err = p.ReadI32(ctx); err != nil {
-		return name, typeId, seqId, err
-	}
-	return name, typeId, seqId, nil
-}
-
-func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error {
-	return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
-	_, err = p.ParseObjectStart()
-	return "", err
-}
-
-func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error {
-	return p.ParseObjectEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
-	if err := p.ParsePreValue(); err != nil {
-		return "", STOP, 0, err
-	}
-	b, _ := p.reader.Peek(1)
-	if len(b) > 0 {
-		switch b[0] {
-		case JSON_RBRACE[0]:
-			return "", STOP, 0, nil
-		case JSON_QUOTE:
-			p.reader.ReadByte()
-			name, err := p.ParseStringBody()
-			// simplejson is not meant to be read back into thrift
-			// - see http://wiki.apache.org/thrift/ThriftUsageJava
-			// - use JSON instead
-			if err != nil {
-				return name, STOP, 0, err
-			}
-			return name, STOP, -1, p.ParsePostValue()
-		}
-		e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
-		return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
-	}
-	return "", STOP, 0, NewTProtocolException(io.EOF)
-}
-
-func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error {
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
-	if isNull, e := p.ParseListBegin(); isNull || e != nil {
-		return VOID, VOID, 0, e
-	}
-
-	// read keyType
-	bKeyType, e := p.ReadByte(ctx)
-	keyType = TType(bKeyType)
-	if e != nil {
-		return keyType, valueType, size, e
-	}
-
-	// read valueType
-	bValueType, e := p.ReadByte(ctx)
-	valueType = TType(bValueType)
-	if e != nil {
-		return keyType, valueType, size, e
-	}
-
-	// read size
-	iSize, err := p.ReadI64(ctx)
-	size = int(iSize)
-	return keyType, valueType, size, err
-}
-
-func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error {
-	return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
-	return p.ParseElemListBegin()
-}
-
-func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error {
-	return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
-	return p.ParseElemListBegin()
-}
-
-func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error {
-	return p.ParseListEnd()
-}
-
-func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
-	var value bool
-
-	if err := p.ParsePreValue(); err != nil {
-		return value, err
-	}
-	f, _ := p.reader.Peek(1)
-	if len(f) > 0 {
-		switch f[0] {
-		case JSON_TRUE[0]:
-			b := make([]byte, len(JSON_TRUE))
-			_, err := p.reader.Read(b)
-			if err != nil {
-				return false, NewTProtocolException(err)
-			}
-			if string(b) == string(JSON_TRUE) {
-				value = true
-			} else {
-				e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
-				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-			break
-		case JSON_FALSE[0]:
-			b := make([]byte, len(JSON_FALSE))
-			_, err := p.reader.Read(b)
-			if err != nil {
-				return false, NewTProtocolException(err)
-			}
-			if string(b) == string(JSON_FALSE) {
-				value = false
-			} else {
-				e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
-				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-			break
-		case JSON_NULL[0]:
-			b := make([]byte, len(JSON_NULL))
-			_, err := p.reader.Read(b)
-			if err != nil {
-				return false, NewTProtocolException(err)
-			}
-			if string(b) == string(JSON_NULL) {
-				value = false
-			} else {
-				e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
-				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-		default:
-			e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
-			return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
-		}
-	}
-	return value, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
-	v, err := p.ReadI64(ctx)
-	return int8(v), err
-}
-
-func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
-	v, err := p.ReadI64(ctx)
-	return int16(v), err
-}
-
-func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
-	v, err := p.ReadI64(ctx)
-	return int32(v), err
-}
-
-func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
-	v, _, err := p.ParseI64()
-	return v, err
-}
-
-func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
-	v, _, err := p.ParseF64()
-	return v, err
-}
-
-func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) {
-	var v string
-	if err := p.ParsePreValue(); err != nil {
-		return v, err
-	}
-	f, _ := p.reader.Peek(1)
-	if len(f) > 0 && f[0] == JSON_QUOTE {
-		p.reader.ReadByte()
-		value, err := p.ParseStringBody()
-		v = value
-		if err != nil {
-			return v, err
-		}
-	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
-		b := make([]byte, len(JSON_NULL))
-		_, err := p.reader.Read(b)
-		if err != nil {
-			return v, NewTProtocolException(err)
-		}
-		if string(b) != string(JSON_NULL) {
-			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
-			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
-		}
-	} else {
-		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
-		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
-	}
-	return v, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
-	var v []byte
-	if err := p.ParsePreValue(); err != nil {
-		return nil, err
-	}
-	f, _ := p.reader.Peek(1)
-	if len(f) > 0 && f[0] == JSON_QUOTE {
-		p.reader.ReadByte()
-		value, err := p.ParseBase64EncodedBody()
-		v = value
-		if err != nil {
-			return v, err
-		}
-	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
-		b := make([]byte, len(JSON_NULL))
-		_, err := p.reader.Read(b)
-		if err != nil {
-			return v, NewTProtocolException(err)
-		}
-		if string(b) != string(JSON_NULL) {
-			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
-			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
-		}
-	} else {
-		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
-		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
-	}
-
-	return v, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) {
-	return NewTProtocolException(p.writer.Flush())
-}
-
-func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
-	return SkipDefaultDepth(ctx, p, fieldType)
-}
-
-func (p *TSimpleJSONProtocol) Transport() TTransport {
-	return p.trans
-}
-
-func (p *TSimpleJSONProtocol) OutputPreValue() error {
-	cxt, ok := p.dumpContext.peek()
-	if !ok {
-		return errEmptyJSONContextStack
-	}
-	switch cxt {
-	case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
-		if _, e := p.write(JSON_COMMA); e != nil {
-			return NewTProtocolException(e)
-		}
-	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
-		if _, e := p.write(JSON_COLON); e != nil {
-			return NewTProtocolException(e)
-		}
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputPostValue() error {
-	cxt, ok := p.dumpContext.peek()
-	if !ok {
-		return errEmptyJSONContextStack
-	}
-	switch cxt {
-	case _CONTEXT_IN_LIST_FIRST:
-		p.dumpContext.pop()
-		p.dumpContext.push(_CONTEXT_IN_LIST)
-	case _CONTEXT_IN_OBJECT_FIRST:
-		p.dumpContext.pop()
-		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
-	case _CONTEXT_IN_OBJECT_NEXT_KEY:
-		p.dumpContext.pop()
-		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
-	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
-		p.dumpContext.pop()
-		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
-	if e := p.OutputPreValue(); e != nil {
-		return e
-	}
-	var v string
-	if value {
-		v = string(JSON_TRUE)
-	} else {
-		v = string(JSON_FALSE)
-	}
-	cxt, ok := p.dumpContext.peek()
-	if !ok {
-		return errEmptyJSONContextStack
-	}
-	switch cxt {
-	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
-		v = jsonQuote(v)
-	}
-	if e := p.OutputStringData(v); e != nil {
-		return e
-	}
-	return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputNull() error {
-	if e := p.OutputPreValue(); e != nil {
-		return e
-	}
-	if _, e := p.write(JSON_NULL); e != nil {
-		return NewTProtocolException(e)
-	}
-	return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
-	if e := p.OutputPreValue(); e != nil {
-		return e
-	}
-	var v string
-	if math.IsNaN(value) {
-		v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
-	} else if math.IsInf(value, 1) {
-		v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
-	} else if math.IsInf(value, -1) {
-		v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
-	} else {
-		cxt, ok := p.dumpContext.peek()
-		if !ok {
-			return errEmptyJSONContextStack
-		}
-		v = strconv.FormatFloat(value, 'g', -1, 64)
-		switch cxt {
-		case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
-			v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
-		}
-	}
-	if e := p.OutputStringData(v); e != nil {
-		return e
-	}
-	return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
-	if e := p.OutputPreValue(); e != nil {
-		return e
-	}
-	cxt, ok := p.dumpContext.peek()
-	if !ok {
-		return errEmptyJSONContextStack
-	}
-	v := strconv.FormatInt(value, 10)
-	switch cxt {
-	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
-		v = jsonQuote(v)
-	}
-	if e := p.OutputStringData(v); e != nil {
-		return e
-	}
-	return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputString(s string) error {
-	if e := p.OutputPreValue(); e != nil {
-		return e
-	}
-	if e := p.OutputStringData(jsonQuote(s)); e != nil {
-		return e
-	}
-	return p.OutputPostValue()
-}
-
-func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
-	_, e := p.write([]byte(s))
-	return NewTProtocolException(e)
-}
-
-func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
-	if e := p.OutputPreValue(); e != nil {
-		return e
-	}
-	if _, e := p.write(JSON_LBRACE); e != nil {
-		return NewTProtocolException(e)
-	}
-	p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST)
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
-	if _, e := p.write(JSON_RBRACE); e != nil {
-		return NewTProtocolException(e)
-	}
-	_, ok := p.dumpContext.pop()
-	if !ok {
-		return errEmptyJSONContextStack
-	}
-	if e := p.OutputPostValue(); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputListBegin() error {
-	if e := p.OutputPreValue(); e != nil {
-		return e
-	}
-	if _, e := p.write(JSON_LBRACKET); e != nil {
-		return NewTProtocolException(e)
-	}
-	p.dumpContext.push(_CONTEXT_IN_LIST_FIRST)
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputListEnd() error {
-	if _, e := p.write(JSON_RBRACKET); e != nil {
-		return NewTProtocolException(e)
-	}
-	_, ok := p.dumpContext.pop()
-	if !ok {
-		return errEmptyJSONContextStack
-	}
-	if e := p.OutputPostValue(); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
-	if e := p.OutputListBegin(); e != nil {
-		return e
-	}
-	if e := p.OutputI64(int64(elemType)); e != nil {
-		return e
-	}
-	if e := p.OutputI64(int64(size)); e != nil {
-		return e
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) ParsePreValue() error {
-	if e := p.readNonSignificantWhitespace(); e != nil {
-		return NewTProtocolException(e)
-	}
-	cxt, ok := p.parseContextStack.peek()
-	if !ok {
-		return errEmptyJSONContextStack
-	}
-	b, _ := p.reader.Peek(1)
-	switch cxt {
-	case _CONTEXT_IN_LIST:
-		if len(b) > 0 {
-			switch b[0] {
-			case JSON_RBRACKET[0]:
-				return nil
-			case JSON_COMMA[0]:
-				p.reader.ReadByte()
-				if e := p.readNonSignificantWhitespace(); e != nil {
-					return NewTProtocolException(e)
-				}
-				return nil
-			default:
-				e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
-				return NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-		}
-	case _CONTEXT_IN_OBJECT_NEXT_KEY:
-		if len(b) > 0 {
-			switch b[0] {
-			case JSON_RBRACE[0]:
-				return nil
-			case JSON_COMMA[0]:
-				p.reader.ReadByte()
-				if e := p.readNonSignificantWhitespace(); e != nil {
-					return NewTProtocolException(e)
-				}
-				return nil
-			default:
-				e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
-				return NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-		}
-	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
-		if len(b) > 0 {
-			switch b[0] {
-			case JSON_COLON[0]:
-				p.reader.ReadByte()
-				if e := p.readNonSignificantWhitespace(); e != nil {
-					return NewTProtocolException(e)
-				}
-				return nil
-			default:
-				e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
-				return NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-		}
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) ParsePostValue() error {
-	if e := p.readNonSignificantWhitespace(); e != nil {
-		return NewTProtocolException(e)
-	}
-	cxt, ok := p.parseContextStack.peek()
-	if !ok {
-		return errEmptyJSONContextStack
-	}
-	switch cxt {
-	case _CONTEXT_IN_LIST_FIRST:
-		p.parseContextStack.pop()
-		p.parseContextStack.push(_CONTEXT_IN_LIST)
-	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
-		p.parseContextStack.pop()
-		p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
-	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
-		p.parseContextStack.pop()
-		p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
-	for {
-		b, _ := p.reader.Peek(1)
-		if len(b) < 1 {
-			return nil
-		}
-		switch b[0] {
-		case ' ', '\r', '\n', '\t':
-			p.reader.ReadByte()
-			continue
-		default:
-			break
-		}
-		break
-	}
-	return nil
-}
-
-func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
-	line, err := p.reader.ReadString(JSON_QUOTE)
-	if err != nil {
-		return "", NewTProtocolException(err)
-	}
-	l := len(line)
-	// count number of escapes to see if we need to keep going
-	i := 1
-	for ; i < l; i++ {
-		if line[l-i-1] != '\\' {
-			break
-		}
-	}
-	if i&0x01 == 1 {
-		v, ok := jsonUnquote(string(JSON_QUOTE) + line)
-		if !ok {
-			return "", NewTProtocolException(err)
-		}
-		return v, nil
-	}
-	s, err := p.ParseQuotedStringBody()
-	if err != nil {
-		return "", NewTProtocolException(err)
-	}
-	str := string(JSON_QUOTE) + line + s
-	v, ok := jsonUnquote(str)
-	if !ok {
-		e := fmt.Errorf("Unable to parse as JSON string %s", str)
-		return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
-	}
-	return v, nil
-}
-
-func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
-	line, err := p.reader.ReadString(JSON_QUOTE)
-	if err != nil {
-		return "", NewTProtocolException(err)
-	}
-	l := len(line)
-	// count number of escapes to see if we need to keep going
-	i := 1
-	for ; i < l; i++ {
-		if line[l-i-1] != '\\' {
-			break
-		}
-	}
-	if i&0x01 == 1 {
-		return line, nil
-	}
-	s, err := p.ParseQuotedStringBody()
-	if err != nil {
-		return "", NewTProtocolException(err)
-	}
-	v := line + s
-	return v, nil
-}
-
-func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
-	line, err := p.reader.ReadBytes(JSON_QUOTE)
-	if err != nil {
-		return line, NewTProtocolException(err)
-	}
-	line2 := line[0 : len(line)-1]
-	l := len(line2)
-	if (l % 4) != 0 {
-		pad := 4 - (l % 4)
-		fill := [...]byte{'=', '=', '='}
-		line2 = append(line2, fill[:pad]...)
-		l = len(line2)
-	}
-	output := make([]byte, base64.StdEncoding.DecodedLen(l))
-	n, err := base64.StdEncoding.Decode(output, line2)
-	return output[0:n], NewTProtocolException(err)
-}
-
-func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
-	if err := p.ParsePreValue(); err != nil {
-		return 0, false, err
-	}
-	var value int64
-	var isnull bool
-	if p.safePeekContains(JSON_NULL) {
-		p.reader.Read(make([]byte, len(JSON_NULL)))
-		isnull = true
-	} else {
-		num, err := p.readNumeric()
-		isnull = (num == nil)
-		if !isnull {
-			value = num.Int64()
-		}
-		if err != nil {
-			return value, isnull, err
-		}
-	}
-	return value, isnull, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
-	if err := p.ParsePreValue(); err != nil {
-		return 0, false, err
-	}
-	var value float64
-	var isnull bool
-	if p.safePeekContains(JSON_NULL) {
-		p.reader.Read(make([]byte, len(JSON_NULL)))
-		isnull = true
-	} else {
-		num, err := p.readNumeric()
-		isnull = (num == nil)
-		if !isnull {
-			value = num.Float64()
-		}
-		if err != nil {
-			return value, isnull, err
-		}
-	}
-	return value, isnull, p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
-	if err := p.ParsePreValue(); err != nil {
-		return false, err
-	}
-	var b []byte
-	b, err := p.reader.Peek(1)
-	if err != nil {
-		return false, err
-	}
-	if len(b) > 0 && b[0] == JSON_LBRACE[0] {
-		p.reader.ReadByte()
-		p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST)
-		return false, nil
-	} else if p.safePeekContains(JSON_NULL) {
-		return true, nil
-	}
-	e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
-	return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
-}
-
-func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
-	if isNull, err := p.readIfNull(); isNull || err != nil {
-		return err
-	}
-	cxt, _ := p.parseContextStack.peek()
-	if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
-		e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
-		return NewTProtocolExceptionWithType(INVALID_DATA, e)
-	}
-	line, err := p.reader.ReadString(JSON_RBRACE[0])
-	if err != nil {
-		return NewTProtocolException(err)
-	}
-	for _, char := range line {
-		switch char {
-		default:
-			e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
-			return NewTProtocolExceptionWithType(INVALID_DATA, e)
-		case ' ', '\n', '\r', '\t', '}':
-			break
-		}
-	}
-	p.parseContextStack.pop()
-	return p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
-	if e := p.ParsePreValue(); e != nil {
-		return false, e
-	}
-	var b []byte
-	b, err = p.reader.Peek(1)
-	if err != nil {
-		return false, err
-	}
-	if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
-		p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST)
-		p.reader.ReadByte()
-		isNull = false
-	} else if p.safePeekContains(JSON_NULL) {
-		isNull = true
-	} else {
-		err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
-	}
-	return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
-}
-
-func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
-	if isNull, e := p.ParseListBegin(); isNull || e != nil {
-		return VOID, 0, e
-	}
-	bElemType, _, err := p.ParseI64()
-	elemType = TType(bElemType)
-	if err != nil {
-		return elemType, size, err
-	}
-	nSize, _, err2 := p.ParseI64()
-	size = int(nSize)
-	return elemType, size, err2
-}
-
-func (p *TSimpleJSONProtocol) ParseListEnd() error {
-	if isNull, err := p.readIfNull(); isNull || err != nil {
-		return err
-	}
-	cxt, _ := p.parseContextStack.peek()
-	if cxt != _CONTEXT_IN_LIST {
-		e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
-		return NewTProtocolExceptionWithType(INVALID_DATA, e)
-	}
-	line, err := p.reader.ReadString(JSON_RBRACKET[0])
-	if err != nil {
-		return NewTProtocolException(err)
-	}
-	for _, char := range line {
-		switch char {
-		default:
-			e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line)
-			return NewTProtocolExceptionWithType(INVALID_DATA, e)
-		case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
-			break
-		}
-	}
-	p.parseContextStack.pop()
-	if cxt, ok := p.parseContextStack.peek(); !ok {
-		return errEmptyJSONContextStack
-	} else if cxt == _CONTEXT_IN_TOPLEVEL {
-		return nil
-	}
-	return p.ParsePostValue()
-}
-
-func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
-	e := p.readNonSignificantWhitespace()
-	if e != nil {
-		return nil, VOID, NewTProtocolException(e)
-	}
-	b, e := p.reader.Peek(1)
-	if len(b) > 0 {
-		c := b[0]
-		switch c {
-		case JSON_NULL[0]:
-			buf := make([]byte, len(JSON_NULL))
-			_, e := p.reader.Read(buf)
-			if e != nil {
-				return nil, VOID, NewTProtocolException(e)
-			}
-			if string(JSON_NULL) != string(buf) {
-				e = mismatch(string(JSON_NULL), string(buf))
-				return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-			return nil, VOID, nil
-		case JSON_QUOTE:
-			p.reader.ReadByte()
-			v, e := p.ParseStringBody()
-			if e != nil {
-				return v, UTF8, NewTProtocolException(e)
-			}
-			if v == JSON_INFINITY {
-				return INFINITY, DOUBLE, nil
-			} else if v == JSON_NEGATIVE_INFINITY {
-				return NEGATIVE_INFINITY, DOUBLE, nil
-			} else if v == JSON_NAN {
-				return NAN, DOUBLE, nil
-			}
-			return v, UTF8, nil
-		case JSON_TRUE[0]:
-			buf := make([]byte, len(JSON_TRUE))
-			_, e := p.reader.Read(buf)
-			if e != nil {
-				return true, BOOL, NewTProtocolException(e)
-			}
-			if string(JSON_TRUE) != string(buf) {
-				e := mismatch(string(JSON_TRUE), string(buf))
-				return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-			return true, BOOL, nil
-		case JSON_FALSE[0]:
-			buf := make([]byte, len(JSON_FALSE))
-			_, e := p.reader.Read(buf)
-			if e != nil {
-				return false, BOOL, NewTProtocolException(e)
-			}
-			if string(JSON_FALSE) != string(buf) {
-				e := mismatch(string(JSON_FALSE), string(buf))
-				return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-			return false, BOOL, nil
-		case JSON_LBRACKET[0]:
-			_, e := p.reader.ReadByte()
-			return make([]interface{}, 0), LIST, NewTProtocolException(e)
-		case JSON_LBRACE[0]:
-			_, e := p.reader.ReadByte()
-			return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
-		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
-			// assume numeric
-			v, e := p.readNumeric()
-			return v, DOUBLE, e
-		default:
-			e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
-			return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
-		}
-	}
-	e = fmt.Errorf("Cannot read a single element while parsing JSON.")
-	return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
-
-}
-
-func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
-	cont := true
-	for cont {
-		b, _ := p.reader.Peek(1)
-		if len(b) < 1 {
-			return false, nil
-		}
-		switch b[0] {
-		default:
-			return false, nil
-		case JSON_NULL[0]:
-			cont = false
-			break
-		case ' ', '\n', '\r', '\t':
-			p.reader.ReadByte()
-			break
-		}
-	}
-	if p.safePeekContains(JSON_NULL) {
-		p.reader.Read(make([]byte, len(JSON_NULL)))
-		return true, nil
-	}
-	return false, nil
-}
-
-func (p *TSimpleJSONProtocol) readQuoteIfNext() {
-	b, _ := p.reader.Peek(1)
-	if len(b) > 0 && b[0] == JSON_QUOTE {
-		p.reader.ReadByte()
-	}
-}
-
-func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
-	isNull, err := p.readIfNull()
-	if isNull || err != nil {
-		return NUMERIC_NULL, err
-	}
-	hasDecimalPoint := false
-	nextCanBeSign := true
-	hasE := false
-	MAX_LEN := 40
-	buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
-	continueFor := true
-	inQuotes := false
-	for continueFor {
-		c, err := p.reader.ReadByte()
-		if err != nil {
-			if err == io.EOF {
-				break
-			}
-			return NUMERIC_NULL, NewTProtocolException(err)
-		}
-		switch c {
-		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
-			buf.WriteByte(c)
-			nextCanBeSign = false
-		case '.':
-			if hasDecimalPoint {
-				e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
-				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-			if hasE {
-				e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
-				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-			buf.WriteByte(c)
-			hasDecimalPoint, nextCanBeSign = true, false
-		case 'e', 'E':
-			if hasE {
-				e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
-				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-			buf.WriteByte(c)
-			hasE, nextCanBeSign = true, true
-		case '-', '+':
-			if !nextCanBeSign {
-				e := fmt.Errorf("Negative sign within number")
-				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-			buf.WriteByte(c)
-			nextCanBeSign = false
-		case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
-			p.reader.UnreadByte()
-			continueFor = false
-		case JSON_NAN[0]:
-			if buf.Len() == 0 {
-				buffer := make([]byte, len(JSON_NAN))
-				buffer[0] = c
-				_, e := p.reader.Read(buffer[1:])
-				if e != nil {
-					return NUMERIC_NULL, NewTProtocolException(e)
-				}
-				if JSON_NAN != string(buffer) {
-					e := mismatch(JSON_NAN, string(buffer))
-					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-				}
-				if inQuotes {
-					p.readQuoteIfNext()
-				}
-				return NAN, nil
-			} else {
-				e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
-				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-		case JSON_INFINITY[0]:
-			if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
-				buffer := make([]byte, len(JSON_INFINITY))
-				buffer[0] = c
-				_, e := p.reader.Read(buffer[1:])
-				if e != nil {
-					return NUMERIC_NULL, NewTProtocolException(e)
-				}
-				if JSON_INFINITY != string(buffer) {
-					e := mismatch(JSON_INFINITY, string(buffer))
-					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-				}
-				if inQuotes {
-					p.readQuoteIfNext()
-				}
-				return INFINITY, nil
-			} else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
-				buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
-				buffer[0] = JSON_NEGATIVE_INFINITY[0]
-				buffer[1] = c
-				_, e := p.reader.Read(buffer[2:])
-				if e != nil {
-					return NUMERIC_NULL, NewTProtocolException(e)
-				}
-				if JSON_NEGATIVE_INFINITY != string(buffer) {
-					e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
-					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-				}
-				if inQuotes {
-					p.readQuoteIfNext()
-				}
-				return NEGATIVE_INFINITY, nil
-			} else {
-				e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
-				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-			}
-		case JSON_QUOTE:
-			if !inQuotes {
-				inQuotes = true
-			} else {
-				break
-			}
-		default:
-			e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
-			return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-		}
-	}
-	if buf.Len() == 0 {
-		e := fmt.Errorf("Unable to parse number from empty string ''")
-		return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
-	}
-	return NewNumericFromJSONString(buf.String(), false), nil
-}
-
-// Safely peeks into the buffer, reading only what is necessary
-func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
-	for i := 0; i < len(b); i++ {
-		a, _ := p.reader.Peek(i + 1)
-		if len(a) < (i+1) || a[i] != b[i] {
-			return false
-		}
-	}
-	return true
-}
-
-// Reset the context stack to its initial state.
-func (p *TSimpleJSONProtocol) resetContextStack() {
-	p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
-	p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
-}
-
-func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
-	n, err := p.writer.Write(b)
-	if err != nil {
-		p.writer.Reset(p.trans) // THRIFT-3735
-	}
-	return n, err
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) {
-	PropagateTConfiguration(p.trans, conf)
-}
-
-var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go
deleted file mode 100644
index 563cbfc694a..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/simple_server.go
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"errors"
-	"fmt"
-	"io"
-	"sync"
-	"sync/atomic"
-	"time"
-)
-
-// ErrAbandonRequest is a special error server handler implementations can
-// return to indicate that the request has been abandoned.
-//
-// TSimpleServer will check for this error, and close the client connection
-// instead of writing the response/error back to the client.
-//
-// It shall only be used when the server handler implementation know that the
-// client already abandoned the request (by checking that the passed in context
-// is already canceled, for example).
-var ErrAbandonRequest = errors.New("request abandoned")
-
-// ServerConnectivityCheckInterval defines the ticker interval used by
-// connectivity check in thrift compiled TProcessorFunc implementations.
-//
-// It's defined as a variable instead of constant, so that thrift server
-// implementations can change its value to control the behavior.
-//
-// If it's changed to <=0, the feature will be disabled.
-var ServerConnectivityCheckInterval = time.Millisecond * 5
-
-/*
- * This is not a typical TSimpleServer as it is not blocked after accept a socket.
- * It is more like a TThreadedServer that can handle different connections in different goroutines.
- * This will work if golang user implements a conn-pool like thing in client side.
- */
-type TSimpleServer struct {
-	closed int32
-	wg     sync.WaitGroup
-	mu     sync.Mutex
-
-	processorFactory       TProcessorFactory
-	serverTransport        TServerTransport
-	inputTransportFactory  TTransportFactory
-	outputTransportFactory TTransportFactory
-	inputProtocolFactory   TProtocolFactory
-	outputProtocolFactory  TProtocolFactory
-
-	// Headers to auto forward in THeaderProtocol
-	forwardHeaders []string
-
-	logger Logger
-}
-
-func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
-	return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
-}
-
-func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
-	return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
-		serverTransport,
-		transportFactory,
-		protocolFactory,
-	)
-}
-
-func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
-	return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
-		serverTransport,
-		inputTransportFactory,
-		outputTransportFactory,
-		inputProtocolFactory,
-		outputProtocolFactory,
-	)
-}
-
-func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
-	return NewTSimpleServerFactory6(processorFactory,
-		serverTransport,
-		NewTTransportFactory(),
-		NewTTransportFactory(),
-		NewTBinaryProtocolFactoryDefault(),
-		NewTBinaryProtocolFactoryDefault(),
-	)
-}
-
-func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
-	return NewTSimpleServerFactory6(processorFactory,
-		serverTransport,
-		transportFactory,
-		transportFactory,
-		protocolFactory,
-		protocolFactory,
-	)
-}
-
-func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
-	return &TSimpleServer{
-		processorFactory:       processorFactory,
-		serverTransport:        serverTransport,
-		inputTransportFactory:  inputTransportFactory,
-		outputTransportFactory: outputTransportFactory,
-		inputProtocolFactory:   inputProtocolFactory,
-		outputProtocolFactory:  outputProtocolFactory,
-	}
-}
-
-func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
-	return p.processorFactory
-}
-
-func (p *TSimpleServer) ServerTransport() TServerTransport {
-	return p.serverTransport
-}
-
-func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
-	return p.inputTransportFactory
-}
-
-func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
-	return p.outputTransportFactory
-}
-
-func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
-	return p.inputProtocolFactory
-}
-
-func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
-	return p.outputProtocolFactory
-}
-
-func (p *TSimpleServer) Listen() error {
-	return p.serverTransport.Listen()
-}
-
-// SetForwardHeaders sets the list of header keys that will be auto forwarded
-// while using THeaderProtocol.
-//
-// "forward" means that when the server is also a client to other upstream
-// thrift servers, the context object user gets in the processor functions will
-// have both read and write headers set, with write headers being forwarded.
-// Users can always override the write headers by calling SetWriteHeaderList
-// before calling thrift client functions.
-func (p *TSimpleServer) SetForwardHeaders(headers []string) {
-	size := len(headers)
-	if size == 0 {
-		p.forwardHeaders = nil
-		return
-	}
-
-	keys := make([]string, size)
-	copy(keys, headers)
-	p.forwardHeaders = keys
-}
-
-// SetLogger sets the logger used by this TSimpleServer.
-//
-// If no logger was set before Serve is called, a default logger using standard
-// log library will be used.
-func (p *TSimpleServer) SetLogger(logger Logger) {
-	p.logger = logger
-}
-
-func (p *TSimpleServer) innerAccept() (int32, error) {
-	client, err := p.serverTransport.Accept()
-	p.mu.Lock()
-	defer p.mu.Unlock()
-	closed := atomic.LoadInt32(&p.closed)
-	if closed != 0 {
-		return closed, nil
-	}
-	if err != nil {
-		return 0, err
-	}
-	if client != nil {
-		p.wg.Add(1)
-		go func() {
-			defer p.wg.Done()
-			if err := p.processRequests(client); err != nil {
-				p.logger(fmt.Sprintf("error processing request: %v", err))
-			}
-		}()
-	}
-	return 0, nil
-}
-
-func (p *TSimpleServer) AcceptLoop() error {
-	for {
-		closed, err := p.innerAccept()
-		if err != nil {
-			return err
-		}
-		if closed != 0 {
-			return nil
-		}
-	}
-}
-
-func (p *TSimpleServer) Serve() error {
-	p.logger = fallbackLogger(p.logger)
-
-	err := p.Listen()
-	if err != nil {
-		return err
-	}
-	p.AcceptLoop()
-	return nil
-}
-
-func (p *TSimpleServer) Stop() error {
-	p.mu.Lock()
-	defer p.mu.Unlock()
-	if atomic.LoadInt32(&p.closed) != 0 {
-		return nil
-	}
-	atomic.StoreInt32(&p.closed, 1)
-	p.serverTransport.Interrupt()
-	p.wg.Wait()
-	return nil
-}
-
-// If err is actually EOF, return nil, otherwise return err as-is.
-func treatEOFErrorsAsNil(err error) error {
-	if err == nil {
-		return nil
-	}
-	if errors.Is(err, io.EOF) {
-		return nil
-	}
-	var te TTransportException
-	if errors.As(err, &te) && te.TypeId() == END_OF_FILE {
-		return nil
-	}
-	return err
-}
-
-func (p *TSimpleServer) processRequests(client TTransport) (err error) {
-	defer func() {
-		err = treatEOFErrorsAsNil(err)
-	}()
-
-	processor := p.processorFactory.GetProcessor(client)
-	inputTransport, err := p.inputTransportFactory.GetTransport(client)
-	if err != nil {
-		return err
-	}
-	inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
-	var outputTransport TTransport
-	var outputProtocol TProtocol
-
-	// for THeaderProtocol, we must use the same protocol instance for
-	// input and output so that the response is in the same dialect that
-	// the server detected the request was in.
-	headerProtocol, ok := inputProtocol.(*THeaderProtocol)
-	if ok {
-		outputProtocol = inputProtocol
-	} else {
-		oTrans, err := p.outputTransportFactory.GetTransport(client)
-		if err != nil {
-			return err
-		}
-		outputTransport = oTrans
-		outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport)
-	}
-
-	if inputTransport != nil {
-		defer inputTransport.Close()
-	}
-	if outputTransport != nil {
-		defer outputTransport.Close()
-	}
-	for {
-		if atomic.LoadInt32(&p.closed) != 0 {
-			return nil
-		}
-
-		ctx := SetResponseHelper(
-			defaultCtx,
-			TResponseHelper{
-				THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol),
-			},
-		)
-		if headerProtocol != nil {
-			// We need to call ReadFrame here, otherwise we won't
-			// get any headers on the AddReadTHeaderToContext call.
-			//
-			// ReadFrame is safe to be called multiple times so it
-			// won't break when it's called again later when we
-			// actually start to read the message.
-			if err := headerProtocol.ReadFrame(ctx); err != nil {
-				return err
-			}
-			ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders())
-			ctx = SetWriteHeaderList(ctx, p.forwardHeaders)
-		}
-
-		ok, err := processor.Process(ctx, inputProtocol, outputProtocol)
-		if errors.Is(err, ErrAbandonRequest) {
-			return client.Close()
-		}
-		if errors.As(err, new(TTransportException)) && err != nil {
-			return err
-		}
-		var tae TApplicationException
-		if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD {
-			continue
-		}
-		if !ok {
-			break
-		}
-	}
-	return nil
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go
deleted file mode 100644
index e911bf16681..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket.go
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"net"
-	"time"
-)
-
-type TSocket struct {
-	conn *socketConn
-	addr net.Addr
-	cfg  *TConfiguration
-
-	connectTimeout time.Duration
-	socketTimeout  time.Duration
-}
-
-// Deprecated: Use NewTSocketConf instead.
-func NewTSocket(hostPort string) (*TSocket, error) {
-	return NewTSocketConf(hostPort, &TConfiguration{
-		noPropagation: true,
-	})
-}
-
-// NewTSocketConf creates a net.Conn-backed TTransport, given a host and port.
-//
-// Example:
-//
-//     trans, err := thrift.NewTSocketConf("localhost:9090", &TConfiguration{
-//         ConnectTimeout: time.Second, // Use 0 for no timeout
-//         SocketTimeout:  time.Second, // Use 0 for no timeout
-//     })
-func NewTSocketConf(hostPort string, conf *TConfiguration) (*TSocket, error) {
-	addr, err := net.ResolveTCPAddr("tcp", hostPort)
-	if err != nil {
-		return nil, err
-	}
-	return NewTSocketFromAddrConf(addr, conf), nil
-}
-
-// Deprecated: Use NewTSocketConf instead.
-func NewTSocketTimeout(hostPort string, connTimeout time.Duration, soTimeout time.Duration) (*TSocket, error) {
-	return NewTSocketConf(hostPort, &TConfiguration{
-		ConnectTimeout: connTimeout,
-		SocketTimeout:  soTimeout,
-
-		noPropagation: true,
-	})
-}
-
-// NewTSocketFromAddrConf creates a TSocket from a net.Addr
-func NewTSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSocket {
-	return &TSocket{
-		addr: addr,
-		cfg:  conf,
-	}
-}
-
-// Deprecated: Use NewTSocketFromAddrConf instead.
-func NewTSocketFromAddrTimeout(addr net.Addr, connTimeout time.Duration, soTimeout time.Duration) *TSocket {
-	return NewTSocketFromAddrConf(addr, &TConfiguration{
-		ConnectTimeout: connTimeout,
-		SocketTimeout:  soTimeout,
-
-		noPropagation: true,
-	})
-}
-
-// NewTSocketFromConnConf creates a TSocket from an existing net.Conn.
-func NewTSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSocket {
-	return &TSocket{
-		conn: wrapSocketConn(conn),
-		addr: conn.RemoteAddr(),
-		cfg:  conf,
-	}
-}
-
-// Deprecated: Use NewTSocketFromConnConf instead.
-func NewTSocketFromConnTimeout(conn net.Conn, socketTimeout time.Duration) *TSocket {
-	return NewTSocketFromConnConf(conn, &TConfiguration{
-		SocketTimeout: socketTimeout,
-
-		noPropagation: true,
-	})
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-//
-// It can be used to set connect and socket timeouts.
-func (p *TSocket) SetTConfiguration(conf *TConfiguration) {
-	p.cfg = conf
-}
-
-// Sets the connect timeout
-func (p *TSocket) SetConnTimeout(timeout time.Duration) error {
-	if p.cfg == nil {
-		p.cfg = &TConfiguration{
-			noPropagation: true,
-		}
-	}
-	p.cfg.ConnectTimeout = timeout
-	return nil
-}
-
-// Sets the socket timeout
-func (p *TSocket) SetSocketTimeout(timeout time.Duration) error {
-	if p.cfg == nil {
-		p.cfg = &TConfiguration{
-			noPropagation: true,
-		}
-	}
-	p.cfg.SocketTimeout = timeout
-	return nil
-}
-
-func (p *TSocket) pushDeadline(read, write bool) {
-	var t time.Time
-	if timeout := p.cfg.GetSocketTimeout(); timeout > 0 {
-		t = time.Now().Add(time.Duration(timeout))
-	}
-	if read && write {
-		p.conn.SetDeadline(t)
-	} else if read {
-		p.conn.SetReadDeadline(t)
-	} else if write {
-		p.conn.SetWriteDeadline(t)
-	}
-}
-
-// Connects the socket, creating a new socket object if necessary.
-func (p *TSocket) Open() error {
-	if p.conn.isValid() {
-		return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
-	}
-	if p.addr == nil {
-		return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
-	}
-	if len(p.addr.Network()) == 0 {
-		return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
-	}
-	if len(p.addr.String()) == 0 {
-		return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
-	}
-	var err error
-	if p.conn, err = createSocketConnFromReturn(net.DialTimeout(
-		p.addr.Network(),
-		p.addr.String(),
-		p.cfg.GetConnectTimeout(),
-	)); err != nil {
-		return NewTTransportException(NOT_OPEN, err.Error())
-	}
-	return nil
-}
-
-// Retrieve the underlying net.Conn
-func (p *TSocket) Conn() net.Conn {
-	return p.conn
-}
-
-// Returns true if the connection is open
-func (p *TSocket) IsOpen() bool {
-	return p.conn.IsOpen()
-}
-
-// Closes the socket.
-func (p *TSocket) Close() error {
-	// Close the socket
-	if p.conn != nil {
-		err := p.conn.Close()
-		if err != nil {
-			return err
-		}
-		p.conn = nil
-	}
-	return nil
-}
-
-//Returns the remote address of the socket.
-func (p *TSocket) Addr() net.Addr {
-	return p.addr
-}
-
-func (p *TSocket) Read(buf []byte) (int, error) {
-	if !p.conn.isValid() {
-		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
-	}
-	p.pushDeadline(true, false)
-	// NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between
-	// p.pushDeadline and p.conn.Read could cause the deadline set inside
-	// p.pushDeadline being reset, thus need to be avoided.
-	n, err := p.conn.Read(buf)
-	return n, NewTTransportExceptionFromError(err)
-}
-
-func (p *TSocket) Write(buf []byte) (int, error) {
-	if !p.conn.isValid() {
-		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
-	}
-	p.pushDeadline(false, true)
-	return p.conn.Write(buf)
-}
-
-func (p *TSocket) Flush(ctx context.Context) error {
-	return nil
-}
-
-func (p *TSocket) Interrupt() error {
-	if !p.conn.isValid() {
-		return nil
-	}
-	return p.conn.Close()
-}
-
-func (p *TSocket) RemainingBytes() (num_bytes uint64) {
-	const maxSize = ^uint64(0)
-	return maxSize // the truth is, we just don't know unless framed is used
-}
-
-var _ TConfigurationSetter = (*TSocket)(nil)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go
deleted file mode 100644
index c1cc30c6cc5..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_conn.go
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"net"
-)
-
-// socketConn is a wrapped net.Conn that tries to do connectivity check.
-type socketConn struct {
-	net.Conn
-
-	buffer [1]byte
-}
-
-var _ net.Conn = (*socketConn)(nil)
-
-// createSocketConnFromReturn is a language sugar to help create socketConn from
-// return values of functions like net.Dial, tls.Dial, net.Listener.Accept, etc.
-func createSocketConnFromReturn(conn net.Conn, err error) (*socketConn, error) {
-	if err != nil {
-		return nil, err
-	}
-	return &socketConn{
-		Conn: conn,
-	}, nil
-}
-
-// wrapSocketConn wraps an existing net.Conn into *socketConn.
-func wrapSocketConn(conn net.Conn) *socketConn {
-	// In case conn is already wrapped,
-	// return it as-is and avoid double wrapping.
-	if sc, ok := conn.(*socketConn); ok {
-		return sc
-	}
-
-	return &socketConn{
-		Conn: conn,
-	}
-}
-
-// isValid checks whether there's a valid connection.
-//
-// It's nil safe, and returns false if sc itself is nil, or if the underlying
-// connection is nil.
-//
-// It's the same as the previous implementation of TSocket.IsOpen and
-// TSSLSocket.IsOpen before we added connectivity check.
-func (sc *socketConn) isValid() bool {
-	return sc != nil && sc.Conn != nil
-}
-
-// IsOpen checks whether the connection is open.
-//
-// It's nil safe, and returns false if sc itself is nil, or if the underlying
-// connection is nil.
-//
-// Otherwise, it tries to do a connectivity check and returns the result.
-//
-// It also has the side effect of resetting the previously set read deadline on
-// the socket. As a result, it shouldn't be called between setting read deadline
-// and doing actual read.
-func (sc *socketConn) IsOpen() bool {
-	if !sc.isValid() {
-		return false
-	}
-	return sc.checkConn() == nil
-}
-
-// Read implements io.Reader.
-//
-// On Windows, it behaves the same as the underlying net.Conn.Read.
-//
-// On non-Windows, it treats len(p) == 0 as a connectivity check instead of
-// readability check, which means instead of blocking until there's something to
-// read (readability check), or always return (0, nil) (the default behavior of
-// go's stdlib implementation on non-Windows), it never blocks, and will return
-// an error if the connection is lost.
-func (sc *socketConn) Read(p []byte) (n int, err error) {
-	if len(p) == 0 {
-		return 0, sc.read0()
-	}
-
-	return sc.Conn.Read(p)
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go
deleted file mode 100644
index f5fab3ab653..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_unix_conn.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// +build !windows
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"errors"
-	"io"
-	"syscall"
-	"time"
-)
-
-// We rely on this variable to be the zero time,
-// but define it as global variable to avoid repetitive allocations.
-// Please DO NOT mutate this variable in any way.
-var zeroTime time.Time
-
-func (sc *socketConn) read0() error {
-	return sc.checkConn()
-}
-
-func (sc *socketConn) checkConn() error {
-	syscallConn, ok := sc.Conn.(syscall.Conn)
-	if !ok {
-		// No way to check, return nil
-		return nil
-	}
-
-	// The reading about to be done here is non-blocking so we don't really
-	// need a read deadline. We just need to clear the previously set read
-	// deadline, if any.
-	sc.Conn.SetReadDeadline(zeroTime)
-
-	rc, err := syscallConn.SyscallConn()
-	if err != nil {
-		return err
-	}
-
-	var n int
-
-	if readErr := rc.Read(func(fd uintptr) bool {
-		n, _, err = syscall.Recvfrom(int(fd), sc.buffer[:], syscall.MSG_PEEK|syscall.MSG_DONTWAIT)
-		return true
-	}); readErr != nil {
-		return readErr
-	}
-
-	if n > 0 {
-		// We got something, which means we are good
-		return nil
-	}
-
-	if errors.Is(err, syscall.EAGAIN) || errors.Is(err, syscall.EWOULDBLOCK) {
-		// This means the connection is still open but we don't have
-		// anything to read right now.
-		return nil
-	}
-
-	if err != nil {
-		return err
-	}
-
-	// At this point, it means the other side already closed the connection.
-	return io.EOF
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go
deleted file mode 100644
index 679838c3b64..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/socket_windows_conn.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build windows
-
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-func (sc *socketConn) read0() error {
-	// On windows, we fallback to the default behavior of reading 0 bytes.
-	var p []byte
-	_, err := sc.Conn.Read(p)
-	return err
-}
-
-func (sc *socketConn) checkConn() error {
-	// On windows, we always return nil for this check.
-	return nil
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go
deleted file mode 100644
index 907afca326f..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_server_socket.go
+++ /dev/null
@@ -1,112 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"crypto/tls"
-	"net"
-	"time"
-)
-
-type TSSLServerSocket struct {
-	listener      net.Listener
-	addr          net.Addr
-	clientTimeout time.Duration
-	interrupted   bool
-	cfg           *tls.Config
-}
-
-func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) {
-	return NewTSSLServerSocketTimeout(listenAddr, cfg, 0)
-}
-
-func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
-	if cfg.MinVersion == 0 {
-		cfg.MinVersion = tls.VersionTLS10
-	}
-	addr, err := net.ResolveTCPAddr("tcp", listenAddr)
-	if err != nil {
-		return nil, err
-	}
-	return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil
-}
-
-func (p *TSSLServerSocket) Listen() error {
-	if p.IsListening() {
-		return nil
-	}
-	l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg)
-	if err != nil {
-		return err
-	}
-	p.listener = l
-	return nil
-}
-
-func (p *TSSLServerSocket) Accept() (TTransport, error) {
-	if p.interrupted {
-		return nil, errTransportInterrupted
-	}
-	if p.listener == nil {
-		return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
-	}
-	conn, err := p.listener.Accept()
-	if err != nil {
-		return nil, NewTTransportExceptionFromError(err)
-	}
-	return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil
-}
-
-// Checks whether the socket is listening.
-func (p *TSSLServerSocket) IsListening() bool {
-	return p.listener != nil
-}
-
-// Connects the socket, creating a new socket object if necessary.
-func (p *TSSLServerSocket) Open() error {
-	if p.IsListening() {
-		return NewTTransportException(ALREADY_OPEN, "Server socket already open")
-	}
-	if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
-		return err
-	} else {
-		p.listener = l
-	}
-	return nil
-}
-
-func (p *TSSLServerSocket) Addr() net.Addr {
-	return p.addr
-}
-
-func (p *TSSLServerSocket) Close() error {
-	defer func() {
-		p.listener = nil
-	}()
-	if p.IsListening() {
-		return p.listener.Close()
-	}
-	return nil
-}
-
-func (p *TSSLServerSocket) Interrupt() error {
-	p.interrupted = true
-	return nil
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go
deleted file mode 100644
index 6359a74ceb2..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/ssl_socket.go
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"crypto/tls"
-	"net"
-	"time"
-)
-
-type TSSLSocket struct {
-	conn *socketConn
-	// hostPort contains host:port (e.g. "asdf.com:12345"). The field is
-	// only valid if addr is nil.
-	hostPort string
-	// addr is nil when hostPort is not "", and is only used when the
-	// TSSLSocket is constructed from a net.Addr.
-	addr net.Addr
-
-	cfg *TConfiguration
-}
-
-// NewTSSLSocketConf creates a net.Conn-backed TTransport, given a host and port.
-//
-// Example:
-//
-//     trans, err := thrift.NewTSSLSocketConf("localhost:9090", nil, &TConfiguration{
-//         ConnectTimeout: time.Second, // Use 0 for no timeout
-//         SocketTimeout:  time.Second, // Use 0 for no timeout
-//     })
-func NewTSSLSocketConf(hostPort string, conf *TConfiguration) (*TSSLSocket, error) {
-	if cfg := conf.GetTLSConfig(); cfg != nil && cfg.MinVersion == 0 {
-		cfg.MinVersion = tls.VersionTLS10
-	}
-	return &TSSLSocket{
-		hostPort: hostPort,
-		cfg:      conf,
-	}, nil
-}
-
-// Deprecated: Use NewTSSLSocketConf instead.
-func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
-	return NewTSSLSocketConf(hostPort, &TConfiguration{
-		TLSConfig: cfg,
-
-		noPropagation: true,
-	})
-}
-
-// Deprecated: Use NewTSSLSocketConf instead.
-func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) (*TSSLSocket, error) {
-	return NewTSSLSocketConf(hostPort, &TConfiguration{
-		ConnectTimeout: connectTimeout,
-		SocketTimeout:  socketTimeout,
-		TLSConfig:      cfg,
-
-		noPropagation: true,
-	})
-}
-
-// NewTSSLSocketFromAddrConf creates a TSSLSocket from a net.Addr.
-func NewTSSLSocketFromAddrConf(addr net.Addr, conf *TConfiguration) *TSSLSocket {
-	return &TSSLSocket{
-		addr: addr,
-		cfg:  conf,
-	}
-}
-
-// Deprecated: Use NewTSSLSocketFromAddrConf instead.
-func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, connectTimeout, socketTimeout time.Duration) *TSSLSocket {
-	return NewTSSLSocketFromAddrConf(addr, &TConfiguration{
-		ConnectTimeout: connectTimeout,
-		SocketTimeout:  socketTimeout,
-		TLSConfig:      cfg,
-
-		noPropagation: true,
-	})
-}
-
-// NewTSSLSocketFromConnConf creates a TSSLSocket from an existing net.Conn.
-func NewTSSLSocketFromConnConf(conn net.Conn, conf *TConfiguration) *TSSLSocket {
-	return &TSSLSocket{
-		conn: wrapSocketConn(conn),
-		addr: conn.RemoteAddr(),
-		cfg:  conf,
-	}
-}
-
-// Deprecated: Use NewTSSLSocketFromConnConf instead.
-func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, socketTimeout time.Duration) *TSSLSocket {
-	return NewTSSLSocketFromConnConf(conn, &TConfiguration{
-		SocketTimeout: socketTimeout,
-		TLSConfig:     cfg,
-
-		noPropagation: true,
-	})
-}
-
-// SetTConfiguration implements TConfigurationSetter.
-//
-// It can be used to change connect and socket timeouts.
-func (p *TSSLSocket) SetTConfiguration(conf *TConfiguration) {
-	p.cfg = conf
-}
-
-// Sets the connect timeout
-func (p *TSSLSocket) SetConnTimeout(timeout time.Duration) error {
-	if p.cfg == nil {
-		p.cfg = &TConfiguration{}
-	}
-	p.cfg.ConnectTimeout = timeout
-	return nil
-}
-
-// Sets the socket timeout
-func (p *TSSLSocket) SetSocketTimeout(timeout time.Duration) error {
-	if p.cfg == nil {
-		p.cfg = &TConfiguration{}
-	}
-	p.cfg.SocketTimeout = timeout
-	return nil
-}
-
-func (p *TSSLSocket) pushDeadline(read, write bool) {
-	var t time.Time
-	if timeout := p.cfg.GetSocketTimeout(); timeout > 0 {
-		t = time.Now().Add(time.Duration(timeout))
-	}
-	if read && write {
-		p.conn.SetDeadline(t)
-	} else if read {
-		p.conn.SetReadDeadline(t)
-	} else if write {
-		p.conn.SetWriteDeadline(t)
-	}
-}
-
-// Connects the socket, creating a new socket object if necessary.
-func (p *TSSLSocket) Open() error {
-	var err error
-	// If we have a hostname, we need to pass the hostname to tls.Dial for
-	// certificate hostname checks.
-	if p.hostPort != "" {
-		if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer(
-			&net.Dialer{
-				Timeout: p.cfg.GetConnectTimeout(),
-			},
-			"tcp",
-			p.hostPort,
-			p.cfg.GetTLSConfig(),
-		)); err != nil {
-			return NewTTransportException(NOT_OPEN, err.Error())
-		}
-	} else {
-		if p.conn.isValid() {
-			return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
-		}
-		if p.addr == nil {
-			return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
-		}
-		if len(p.addr.Network()) == 0 {
-			return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
-		}
-		if len(p.addr.String()) == 0 {
-			return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
-		}
-		if p.conn, err = createSocketConnFromReturn(tls.DialWithDialer(
-			&net.Dialer{
-				Timeout: p.cfg.GetConnectTimeout(),
-			},
-			p.addr.Network(),
-			p.addr.String(),
-			p.cfg.GetTLSConfig(),
-		)); err != nil {
-			return NewTTransportException(NOT_OPEN, err.Error())
-		}
-	}
-	return nil
-}
-
-// Retrieve the underlying net.Conn
-func (p *TSSLSocket) Conn() net.Conn {
-	return p.conn
-}
-
-// Returns true if the connection is open
-func (p *TSSLSocket) IsOpen() bool {
-	return p.conn.IsOpen()
-}
-
-// Closes the socket.
-func (p *TSSLSocket) Close() error {
-	// Close the socket
-	if p.conn != nil {
-		err := p.conn.Close()
-		if err != nil {
-			return err
-		}
-		p.conn = nil
-	}
-	return nil
-}
-
-func (p *TSSLSocket) Read(buf []byte) (int, error) {
-	if !p.conn.isValid() {
-		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
-	}
-	p.pushDeadline(true, false)
-	// NOTE: Calling any of p.IsOpen, p.conn.read0, or p.conn.IsOpen between
-	// p.pushDeadline and p.conn.Read could cause the deadline set inside
-	// p.pushDeadline being reset, thus need to be avoided.
-	n, err := p.conn.Read(buf)
-	return n, NewTTransportExceptionFromError(err)
-}
-
-func (p *TSSLSocket) Write(buf []byte) (int, error) {
-	if !p.conn.isValid() {
-		return 0, NewTTransportException(NOT_OPEN, "Connection not open")
-	}
-	p.pushDeadline(false, true)
-	return p.conn.Write(buf)
-}
-
-func (p *TSSLSocket) Flush(ctx context.Context) error {
-	return nil
-}
-
-func (p *TSSLSocket) Interrupt() error {
-	if !p.conn.isValid() {
-		return nil
-	}
-	return p.conn.Close()
-}
-
-func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
-	const maxSize = ^uint64(0)
-	return maxSize // the truth is, we just don't know unless framed is used
-}
-
-var _ TConfigurationSetter = (*TSSLSocket)(nil)
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go
deleted file mode 100644
index d68d0b3179c..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"context"
-	"errors"
-	"io"
-)
-
-var errTransportInterrupted = errors.New("Transport Interrupted")
-
-type Flusher interface {
-	Flush() (err error)
-}
-
-type ContextFlusher interface {
-	Flush(ctx context.Context) (err error)
-}
-
-type ReadSizeProvider interface {
-	RemainingBytes() (num_bytes uint64)
-}
-
-// Encapsulates the I/O layer
-type TTransport interface {
-	io.ReadWriteCloser
-	ContextFlusher
-	ReadSizeProvider
-
-	// Opens the transport for communication
-	Open() error
-
-	// Returns true if the transport is open
-	IsOpen() bool
-}
-
-type stringWriter interface {
-	WriteString(s string) (n int, err error)
-}
-
-// This is "enhanced" transport with extra capabilities. You need to use one of these
-// to construct protocol.
-// Notably, TSocket does not implement this interface, and it is always a mistake to use
-// TSocket directly in protocol.
-type TRichTransport interface {
-	io.ReadWriter
-	io.ByteReader
-	io.ByteWriter
-	stringWriter
-	ContextFlusher
-	ReadSizeProvider
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go
deleted file mode 100644
index 0a3f07646d3..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_exception.go
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-import (
-	"errors"
-	"io"
-)
-
-type timeoutable interface {
-	Timeout() bool
-}
-
-// Thrift Transport exception
-type TTransportException interface {
-	TException
-	TypeId() int
-	Err() error
-}
-
-const (
-	UNKNOWN_TRANSPORT_EXCEPTION = 0
-	NOT_OPEN                    = 1
-	ALREADY_OPEN                = 2
-	TIMED_OUT                   = 3
-	END_OF_FILE                 = 4
-)
-
-type tTransportException struct {
-	typeId int
-	err    error
-	msg    string
-}
-
-var _ TTransportException = (*tTransportException)(nil)
-
-func (tTransportException) TExceptionType() TExceptionType {
-	return TExceptionTypeTransport
-}
-
-func (p *tTransportException) TypeId() int {
-	return p.typeId
-}
-
-func (p *tTransportException) Error() string {
-	return p.msg
-}
-
-func (p *tTransportException) Err() error {
-	return p.err
-}
-
-func (p *tTransportException) Unwrap() error {
-	return p.err
-}
-
-func (p *tTransportException) Timeout() bool {
-	return p.typeId == TIMED_OUT
-}
-
-func NewTTransportException(t int, e string) TTransportException {
-	return &tTransportException{
-		typeId: t,
-		err:    errors.New(e),
-		msg:    e,
-	}
-}
-
-func NewTTransportExceptionFromError(e error) TTransportException {
-	if e == nil {
-		return nil
-	}
-
-	if t, ok := e.(TTransportException); ok {
-		return t
-	}
-
-	te := &tTransportException{
-		typeId: UNKNOWN_TRANSPORT_EXCEPTION,
-		err:    e,
-		msg:    e.Error(),
-	}
-
-	if isTimeoutError(e) {
-		te.typeId = TIMED_OUT
-		return te
-	}
-
-	if errors.Is(e, io.EOF) {
-		te.typeId = END_OF_FILE
-		return te
-	}
-
-	return te
-}
-
-func prependTTransportException(prepend string, e TTransportException) TTransportException {
-	return &tTransportException{
-		typeId: e.TypeId(),
-		err:    e,
-		msg:    prepend + e.Error(),
-	}
-}
-
-// isTimeoutError returns true when err is an error caused by timeout.
-//
-// Note that this also includes TTransportException wrapped timeout errors.
-func isTimeoutError(err error) bool {
-	var t timeoutable
-	if errors.As(err, &t) {
-		return t.Timeout()
-	}
-	return false
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go
deleted file mode 100644
index c805807940a..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/transport_factory.go
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Factory class used to create wrapped instance of Transports.
-// This is used primarily in servers, which get Transports from
-// a ServerTransport and then may want to mutate them (i.e. create
-// a BufferedTransport from the underlying base transport)
-type TTransportFactory interface {
-	GetTransport(trans TTransport) (TTransport, error)
-}
-
-type tTransportFactory struct{}
-
-// Return a wrapped instance of the base Transport.
-func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
-	return trans, nil
-}
-
-func NewTTransportFactory() TTransportFactory {
-	return &tTransportFactory{}
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go
deleted file mode 100644
index b24f1b05c45..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/type.go
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package thrift
-
-// Type constants in the Thrift protocol
-type TType byte
-
-const (
-	STOP   = 0
-	VOID   = 1
-	BOOL   = 2
-	BYTE   = 3
-	I08    = 3
-	DOUBLE = 4
-	I16    = 6
-	I32    = 8
-	I64    = 10
-	STRING = 11
-	UTF7   = 11
-	STRUCT = 12
-	MAP    = 13
-	SET    = 14
-	LIST   = 15
-	UTF8   = 16
-	UTF16  = 17
-	//BINARY = 18   wrong and unused
-)
-
-var typeNames = map[int]string{
-	STOP:   "STOP",
-	VOID:   "VOID",
-	BOOL:   "BOOL",
-	BYTE:   "BYTE",
-	DOUBLE: "DOUBLE",
-	I16:    "I16",
-	I32:    "I32",
-	I64:    "I64",
-	STRING: "STRING",
-	STRUCT: "STRUCT",
-	MAP:    "MAP",
-	SET:    "SET",
-	LIST:   "LIST",
-	UTF8:   "UTF8",
-	UTF16:  "UTF16",
-}
-
-func (p TType) String() string {
-	if s, ok := typeNames[int(p)]; ok {
-		return s
-	}
-	return "Unknown"
-}
diff --git a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go b/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go
deleted file mode 100644
index 259943a627c..00000000000
--- a/exporters/jaeger/internal/third_party/thrift/lib/go/thrift/zlib_transport.go
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements. See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership. The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License. You may obtain a copy of the License at
-*
-*   http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing,
-* software distributed under the License is distributed on an
-* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-* KIND, either express or implied. See the License for the
-* specific language governing permissions and limitations
-* under the License.
- */
-
-package thrift
-
-import (
-	"compress/zlib"
-	"context"
-	"io"
-)
-
-// TZlibTransportFactory is a factory for TZlibTransport instances
-type TZlibTransportFactory struct {
-	level   int
-	factory TTransportFactory
-}
-
-// TZlibTransport is a TTransport implementation that makes use of zlib compression.
-type TZlibTransport struct {
-	reader    io.ReadCloser
-	transport TTransport
-	writer    *zlib.Writer
-}
-
-// GetTransport constructs a new instance of NewTZlibTransport
-func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
-	if p.factory != nil {
-		// wrap other factory
-		var err error
-		trans, err = p.factory.GetTransport(trans)
-		if err != nil {
-			return nil, err
-		}
-	}
-	return NewTZlibTransport(trans, p.level)
-}
-
-// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
-func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
-	return &TZlibTransportFactory{level: level, factory: nil}
-}
-
-// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory
-// as a wrapper over existing transport factory
-func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory {
-	return &TZlibTransportFactory{level: level, factory: factory}
-}
-
-// NewTZlibTransport constructs a new instance of TZlibTransport
-func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) {
-	w, err := zlib.NewWriterLevel(trans, level)
-	if err != nil {
-		return nil, err
-	}
-
-	return &TZlibTransport{
-		writer:    w,
-		transport: trans,
-	}, nil
-}
-
-// Close closes the reader and writer (flushing any unwritten data) and closes
-// the underlying transport.
-func (z *TZlibTransport) Close() error {
-	if z.reader != nil {
-		if err := z.reader.Close(); err != nil {
-			return err
-		}
-	}
-	if err := z.writer.Close(); err != nil {
-		return err
-	}
-	return z.transport.Close()
-}
-
-// Flush flushes the writer and its underlying transport.
-func (z *TZlibTransport) Flush(ctx context.Context) error {
-	if err := z.writer.Flush(); err != nil {
-		return err
-	}
-	return z.transport.Flush(ctx)
-}
-
-// IsOpen returns true if the transport is open
-func (z *TZlibTransport) IsOpen() bool {
-	return z.transport.IsOpen()
-}
-
-// Open opens the transport for communication
-func (z *TZlibTransport) Open() error {
-	return z.transport.Open()
-}
-
-func (z *TZlibTransport) Read(p []byte) (int, error) {
-	if z.reader == nil {
-		r, err := zlib.NewReader(z.transport)
-		if err != nil {
-			return 0, NewTTransportExceptionFromError(err)
-		}
-		z.reader = r
-	}
-
-	return z.reader.Read(p)
-}
-
-// RemainingBytes returns the size in bytes of the data that is still to be
-// read.
-func (z *TZlibTransport) RemainingBytes() uint64 {
-	return z.transport.RemainingBytes()
-}
-
-func (z *TZlibTransport) Write(p []byte) (int, error) {
-	return z.writer.Write(p)
-}
-
-// SetTConfiguration implements TConfigurationSetter for propagation.
-func (z *TZlibTransport) SetTConfiguration(conf *TConfiguration) {
-	PropagateTConfiguration(z.transport, conf)
-}
-
-var _ TConfigurationSetter = (*TZlibTransport)(nil)
diff --git a/exporters/jaeger/jaeger.go b/exporters/jaeger/jaeger.go
deleted file mode 100644
index 9b4a54afc66..00000000000
--- a/exporters/jaeger/jaeger.go
+++ /dev/null
@@ -1,360 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
-	"context"
-	"encoding/binary"
-	"encoding/json"
-	"fmt"
-	"sync"
-
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/codes"
-	gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
-	"go.opentelemetry.io/otel/sdk/resource"
-	sdktrace "go.opentelemetry.io/otel/sdk/trace"
-	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
-	"go.opentelemetry.io/otel/trace"
-)
-
-const (
-	keyInstrumentationLibraryName    = "otel.library.name"
-	keyInstrumentationLibraryVersion = "otel.library.version"
-	keyError                         = "error"
-	keySpanKind                      = "span.kind"
-	keyStatusCode                    = "otel.status_code"
-	keyStatusMessage                 = "otel.status_description"
-	keyDroppedAttributeCount         = "otel.event.dropped_attributes_count"
-	keyEventName                     = "event"
-)
-
-// New returns an OTel Exporter implementation that exports the collected
-// spans to Jaeger.
-func New(endpointOption EndpointOption) (*Exporter, error) {
-	uploader, err := endpointOption.newBatchUploader()
-	if err != nil {
-		return nil, err
-	}
-
-	// Fetch default service.name from default resource for backup
-	var defaultServiceName string
-	defaultResource := resource.Default()
-	if value, exists := defaultResource.Set().Value(semconv.ServiceNameKey); exists {
-		defaultServiceName = value.AsString()
-	}
-	if defaultServiceName == "" {
-		return nil, fmt.Errorf("failed to get service name from default resource")
-	}
-
-	stopCh := make(chan struct{})
-	e := &Exporter{
-		uploader:           uploader,
-		stopCh:             stopCh,
-		defaultServiceName: defaultServiceName,
-	}
-	return e, nil
-}
-
-// Exporter exports OpenTelemetry spans to a Jaeger agent or collector.
-type Exporter struct {
-	uploader           batchUploader
-	stopOnce           sync.Once
-	stopCh             chan struct{}
-	defaultServiceName string
-}
-
-var _ sdktrace.SpanExporter = (*Exporter)(nil)
-
-// ExportSpans transforms and exports OpenTelemetry spans to Jaeger.
-func (e *Exporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error {
-	// Return fast if context is already canceled or Exporter shutdown.
-	select {
-	case <-ctx.Done():
-		return ctx.Err()
-	case <-e.stopCh:
-		return nil
-	default:
-	}
-
-	// Cancel export if Exporter is shutdown.
-	var cancel context.CancelFunc
-	ctx, cancel = context.WithCancel(ctx)
-	defer cancel()
-	go func(ctx context.Context, cancel context.CancelFunc) {
-		select {
-		case <-ctx.Done():
-		case <-e.stopCh:
-			cancel()
-		}
-	}(ctx, cancel)
-
-	for _, batch := range jaegerBatchList(spans, e.defaultServiceName) {
-		if err := e.uploader.upload(ctx, batch); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// Shutdown stops the Exporter. This will close all connections and release
-// all resources held by the Exporter.
-func (e *Exporter) Shutdown(ctx context.Context) error {
-	// Stop any active and subsequent exports.
-	e.stopOnce.Do(func() { close(e.stopCh) })
-	select {
-	case <-ctx.Done():
-		return ctx.Err()
-	default:
-	}
-	return e.uploader.shutdown(ctx)
-}
-
-// MarshalLog is the marshaling function used by the logging system to represent this exporter.
-func (e *Exporter) MarshalLog() interface{} {
-	return struct {
-		Type string
-	}{
-		Type: "jaeger",
-	}
-}
-
-func spanToThrift(ss sdktrace.ReadOnlySpan) *gen.Span {
-	attr := ss.Attributes()
-	tags := make([]*gen.Tag, 0, len(attr))
-	for _, kv := range attr {
-		tag := keyValueToTag(kv)
-		if tag != nil {
-			tags = append(tags, tag)
-		}
-	}
-
-	if is := ss.InstrumentationScope(); is.Name != "" {
-		tags = append(tags, getStringTag(keyInstrumentationLibraryName, is.Name))
-		if is.Version != "" {
-			tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, is.Version))
-		}
-	}
-
-	if ss.SpanKind() != trace.SpanKindInternal {
-		tags = append(tags,
-			getStringTag(keySpanKind, ss.SpanKind().String()),
-		)
-	}
-
-	if ss.Status().Code != codes.Unset {
-		switch ss.Status().Code {
-		case codes.Ok:
-			tags = append(tags, getStringTag(keyStatusCode, "OK"))
-		case codes.Error:
-			tags = append(tags, getBoolTag(keyError, true))
-			tags = append(tags, getStringTag(keyStatusCode, "ERROR"))
-		}
-		if ss.Status().Description != "" {
-			tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description))
-		}
-	}
-
-	var logs []*gen.Log
-	for _, a := range ss.Events() {
-		nTags := len(a.Attributes)
-		if a.Name != "" {
-			nTags++
-		}
-		if a.DroppedAttributeCount != 0 {
-			nTags++
-		}
-		fields := make([]*gen.Tag, 0, nTags)
-		if a.Name != "" {
-			// If an event contains an attribute with the same key, it needs
-			// to be given precedence and overwrite this.
-			fields = append(fields, getStringTag(keyEventName, a.Name))
-		}
-		for _, kv := range a.Attributes {
-			tag := keyValueToTag(kv)
-			if tag != nil {
-				fields = append(fields, tag)
-			}
-		}
-		if a.DroppedAttributeCount != 0 {
-			fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount)))
-		}
-		logs = append(logs, &gen.Log{
-			Timestamp: a.Time.UnixNano() / 1000,
-			Fields:    fields,
-		})
-	}
-
-	var refs []*gen.SpanRef
-	for _, link := range ss.Links() {
-		tid := link.SpanContext.TraceID()
-		sid := link.SpanContext.SpanID()
-		refs = append(refs, &gen.SpanRef{
-			TraceIdHigh: int64(binary.BigEndian.Uint64(tid[0:8])),
-			TraceIdLow:  int64(binary.BigEndian.Uint64(tid[8:16])),
-			SpanId:      int64(binary.BigEndian.Uint64(sid[:])),
-			RefType:     gen.SpanRefType_FOLLOWS_FROM,
-		})
-	}
-
-	tid := ss.SpanContext().TraceID()
-	sid := ss.SpanContext().SpanID()
-	psid := ss.Parent().SpanID()
-	return &gen.Span{
-		TraceIdHigh:   int64(binary.BigEndian.Uint64(tid[0:8])),
-		TraceIdLow:    int64(binary.BigEndian.Uint64(tid[8:16])),
-		SpanId:        int64(binary.BigEndian.Uint64(sid[:])),
-		ParentSpanId:  int64(binary.BigEndian.Uint64(psid[:])),
-		OperationName: ss.Name(), // TODO: if span kind is added then add prefix "Sent"/"Recv"
-		Flags:         int32(ss.SpanContext().TraceFlags()),
-		StartTime:     ss.StartTime().UnixNano() / 1000,
-		Duration:      ss.EndTime().Sub(ss.StartTime()).Nanoseconds() / 1000,
-		Tags:          tags,
-		Logs:          logs,
-		References:    refs,
-	}
-}
-
-func keyValueToTag(keyValue attribute.KeyValue) *gen.Tag {
-	var tag *gen.Tag
-	switch keyValue.Value.Type() {
-	case attribute.STRING:
-		s := keyValue.Value.AsString()
-		tag = &gen.Tag{
-			Key:   string(keyValue.Key),
-			VStr:  &s,
-			VType: gen.TagType_STRING,
-		}
-	case attribute.BOOL:
-		b := keyValue.Value.AsBool()
-		tag = &gen.Tag{
-			Key:   string(keyValue.Key),
-			VBool: &b,
-			VType: gen.TagType_BOOL,
-		}
-	case attribute.INT64:
-		i := keyValue.Value.AsInt64()
-		tag = &gen.Tag{
-			Key:   string(keyValue.Key),
-			VLong: &i,
-			VType: gen.TagType_LONG,
-		}
-	case attribute.FLOAT64:
-		f := keyValue.Value.AsFloat64()
-		tag = &gen.Tag{
-			Key:     string(keyValue.Key),
-			VDouble: &f,
-			VType:   gen.TagType_DOUBLE,
-		}
-	case attribute.BOOLSLICE,
-		attribute.INT64SLICE,
-		attribute.FLOAT64SLICE,
-		attribute.STRINGSLICE:
-		data, _ := json.Marshal(keyValue.Value.AsInterface())
-		a := (string)(data)
-		tag = &gen.Tag{
-			Key:   string(keyValue.Key),
-			VStr:  &a,
-			VType: gen.TagType_STRING,
-		}
-	}
-	return tag
-}
-
-func getInt64Tag(k string, i int64) *gen.Tag {
-	return &gen.Tag{
-		Key:   k,
-		VLong: &i,
-		VType: gen.TagType_LONG,
-	}
-}
-
-func getStringTag(k, s string) *gen.Tag {
-	return &gen.Tag{
-		Key:   k,
-		VStr:  &s,
-		VType: gen.TagType_STRING,
-	}
-}
-
-func getBoolTag(k string, b bool) *gen.Tag {
-	return &gen.Tag{
-		Key:   k,
-		VBool: &b,
-		VType: gen.TagType_BOOL,
-	}
-}
-
-// jaegerBatchList transforms a slice of spans into a slice of jaeger Batch.
-func jaegerBatchList(ssl []sdktrace.ReadOnlySpan, defaultServiceName string) []*gen.Batch {
-	if len(ssl) == 0 {
-		return nil
-	}
-
-	batchDict := make(map[attribute.Distinct]*gen.Batch)
-
-	for _, ss := range ssl {
-		if ss == nil {
-			continue
-		}
-
-		resourceKey := ss.Resource().Equivalent()
-		batch, bOK := batchDict[resourceKey]
-		if !bOK {
-			batch = &gen.Batch{
-				Process: process(ss.Resource(), defaultServiceName),
-				Spans:   []*gen.Span{},
-			}
-		}
-		batch.Spans = append(batch.Spans, spanToThrift(ss))
-		batchDict[resourceKey] = batch
-	}
-
-	// Transform the categorized map into a slice
-	batchList := make([]*gen.Batch, 0, len(batchDict))
-	for _, batch := range batchDict {
-		batchList = append(batchList, batch)
-	}
-	return batchList
-}
-
-// process transforms an OTel Resource into a jaeger Process.
-func process(res *resource.Resource, defaultServiceName string) *gen.Process {
-	var process gen.Process
-
-	var serviceName attribute.KeyValue
-	if res != nil {
-		for iter := res.Iter(); iter.Next(); {
-			if iter.Attribute().Key == semconv.ServiceNameKey {
-				serviceName = iter.Attribute()
-				// Don't convert service.name into tag.
-				continue
-			}
-			if tag := keyValueToTag(iter.Attribute()); tag != nil {
-				process.Tags = append(process.Tags, tag)
-			}
-		}
-	}
-
-	// If no service.name is contained in a Span's Resource,
-	// that field MUST be populated from the default Resource.
-	if serviceName.Value.AsString() == "" {
-		serviceName = semconv.ServiceName(defaultServiceName)
-	}
-	process.ServiceName = serviceName.Value.AsString()
-
-	return &process
-}
diff --git a/exporters/jaeger/jaeger_benchmark_test.go b/exporters/jaeger/jaeger_benchmark_test.go
deleted file mode 100644
index ab96ac8c9ed..00000000000
--- a/exporters/jaeger/jaeger_benchmark_test.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
-	"context"
-	"fmt"
-	"testing"
-	"time"
-
-	"go.opentelemetry.io/otel/sdk/instrumentation"
-	tracesdk "go.opentelemetry.io/otel/sdk/trace"
-	"go.opentelemetry.io/otel/sdk/trace/tracetest"
-	"go.opentelemetry.io/otel/trace"
-)
-
-var (
-	traceID     trace.TraceID
-	spanID      trace.SpanID
-	spanContext trace.SpanContext
-
-	instrLibName = "benchmark.tests"
-)
-
-func init() {
-	var err error
-	traceID, err = trace.TraceIDFromHex("0102030405060708090a0b0c0d0e0f10")
-	if err != nil {
-		panic(err)
-	}
-	spanID, err = trace.SpanIDFromHex("0102030405060708")
-	if err != nil {
-		panic(err)
-	}
-	spanContext = trace.NewSpanContext(trace.SpanContextConfig{
-		TraceID: traceID,
-		SpanID:  spanID,
-	})
-}
-
-func spans(n int) []tracesdk.ReadOnlySpan {
-	now := time.Now()
-	s := make(tracetest.SpanStubs, n)
-	for i := 0; i < n; i++ {
-		name := fmt.Sprintf("span %d", i)
-		s[i] = tracetest.SpanStub{
-			SpanContext: spanContext,
-			Name:        name,
-			StartTime:   now,
-			EndTime:     now,
-			SpanKind:    trace.SpanKindClient,
-			InstrumentationLibrary: instrumentation.Library{
-				Name: instrLibName,
-			},
-		}
-	}
-	return s.Snapshots()
-}
-
-func benchmarkExportSpans(b *testing.B, o EndpointOption, i int) {
-	ctx := context.Background()
-	s := spans(i)
-	exp, err := New(o)
-	if err != nil {
-		b.Fatal(err)
-	}
-
-	b.ReportAllocs()
-	b.ResetTimer()
-
-	for n := 0; n < b.N; n++ {
-		if err := exp.ExportSpans(ctx, s); err != nil {
-			b.Error(err)
-		}
-	}
-}
-
-func benchmarkCollector(b *testing.B, i int) {
-	benchmarkExportSpans(b, withTestCollectorEndpoint(), i)
-}
-
-func benchmarkAgent(b *testing.B, i int) {
-	benchmarkExportSpans(b, WithAgentEndpoint(), i)
-}
-
-func BenchmarkCollectorExportSpans1(b *testing.B)     { benchmarkCollector(b, 1) }
-func BenchmarkCollectorExportSpans10(b *testing.B)    { benchmarkCollector(b, 10) }
-func BenchmarkCollectorExportSpans100(b *testing.B)   { benchmarkCollector(b, 100) }
-func BenchmarkCollectorExportSpans1000(b *testing.B)  { benchmarkCollector(b, 1000) }
-func BenchmarkCollectorExportSpans10000(b *testing.B) { benchmarkCollector(b, 10000) }
-func BenchmarkAgentExportSpans1(b *testing.B)         { benchmarkAgent(b, 1) }
-func BenchmarkAgentExportSpans10(b *testing.B)        { benchmarkAgent(b, 10) }
-func BenchmarkAgentExportSpans100(b *testing.B)       { benchmarkAgent(b, 100) }
-
-/*
-* BUG: These tests are not possible currently because the thrift payload size
-* does not fit in a UDP packet with the default size (65000) and will return
-* an error.
-
-func BenchmarkAgentExportSpans1000(b *testing.B)      { benchmarkAgent(b, 1000) }
-func BenchmarkAgentExportSpans10000(b *testing.B)     { benchmarkAgent(b, 10000) }
-
-*/
diff --git a/exporters/jaeger/jaeger_test.go b/exporters/jaeger/jaeger_test.go
deleted file mode 100644
index 312a7fc5ea1..00000000000
--- a/exporters/jaeger/jaeger_test.go
+++ /dev/null
@@ -1,723 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
-	"context"
-	"encoding/binary"
-	"fmt"
-	"os"
-	"sort"
-	"testing"
-	"time"
-
-	"github.com/google/go-cmp/cmp"
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/codes"
-	gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
-	ottest "go.opentelemetry.io/otel/exporters/jaeger/internal/internaltest"
-	"go.opentelemetry.io/otel/sdk/instrumentation"
-	"go.opentelemetry.io/otel/sdk/resource"
-	sdktrace "go.opentelemetry.io/otel/sdk/trace"
-	"go.opentelemetry.io/otel/sdk/trace/tracetest"
-	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
-	"go.opentelemetry.io/otel/trace"
-)
-
-func TestNewRawExporter(t *testing.T) {
-	testCases := []struct {
-		name     string
-		endpoint EndpointOption
-	}{
-		{
-			name:     "default exporter with collector endpoint",
-			endpoint: WithCollectorEndpoint(),
-		},
-		{
-			name:     "default exporter with agent endpoint",
-			endpoint: WithAgentEndpoint(),
-		},
-	}
-
-	for _, tc := range testCases {
-		t.Run(tc.name, func(t *testing.T) {
-			_, err := New(tc.endpoint)
-			assert.NoError(t, err)
-		})
-	}
-}
-
-func TestNewRawExporterUseEnvVarIfOptionUnset(t *testing.T) {
-	// Record and restore env
-	envStore := ottest.NewEnvStore()
-	envStore.Record(envEndpoint)
-	defer func() {
-		require.NoError(t, envStore.Restore())
-	}()
-
-	// If the user sets the environment variable OTEL_EXPORTER_JAEGER_ENDPOINT, endpoint will always get a value.
-	require.NoError(t, os.Unsetenv(envEndpoint))
-	_, err := New(
-		WithCollectorEndpoint(),
-	)
-
-	assert.NoError(t, err)
-}
-
-type testCollectorEndpoint struct {
-	batchesUploaded []*gen.Batch
-}
-
-func (c *testCollectorEndpoint) shutdown(context.Context) error {
-	return nil
-}
-
-func (c *testCollectorEndpoint) upload(_ context.Context, batch *gen.Batch) error {
-	c.batchesUploaded = append(c.batchesUploaded, batch)
-	return nil
-}
-
-var _ batchUploader = (*testCollectorEndpoint)(nil)
-
-func withTestCollectorEndpoint() EndpointOption {
-	return endpointOptionFunc(func() (batchUploader, error) {
-		return &testCollectorEndpoint{}, nil
-	})
-}
-
-func withTestCollectorEndpointInjected(ce *testCollectorEndpoint) EndpointOption {
-	return endpointOptionFunc(func() (batchUploader, error) {
-		return ce, nil
-	})
-}
-
-func TestExporterExportSpan(t *testing.T) {
-	const (
-		serviceName = "test-service"
-		tagKey      = "key"
-		tagVal      = "val"
-	)
-
-	testCollector := &testCollectorEndpoint{}
-	exp, err := New(withTestCollectorEndpointInjected(testCollector))
-	require.NoError(t, err)
-	tp := sdktrace.NewTracerProvider(
-		sdktrace.WithBatcher(exp),
-		sdktrace.WithResource(resource.NewSchemaless(
-			semconv.ServiceName(serviceName),
-			attribute.String(tagKey, tagVal),
-		)),
-	)
-
-	tracer := tp.Tracer("test-tracer")
-
-	ctx := context.Background()
-	for i := 0; i < 3; i++ {
-		_, span := tracer.Start(ctx, fmt.Sprintf("test-span-%d", i))
-		span.End()
-		assert.True(t, span.SpanContext().IsValid())
-	}
-
-	require.NoError(t, tp.Shutdown(ctx))
-
-	batchesUploaded := testCollector.batchesUploaded
-	require.Len(t, batchesUploaded, 1)
-	uploadedBatch := batchesUploaded[0]
-	assert.Equal(t, serviceName, uploadedBatch.GetProcess().GetServiceName())
-	assert.Len(t, uploadedBatch.GetSpans(), 3)
-
-	require.Len(t, uploadedBatch.GetProcess().GetTags(), 1)
-	assert.Equal(t, tagKey, uploadedBatch.GetProcess().GetTags()[0].GetKey())
-	assert.Equal(t, tagVal, uploadedBatch.GetProcess().GetTags()[0].GetVStr())
-}
-
-func TestSpanSnapshotToThrift(t *testing.T) {
-	now := time.Now()
-	traceID, _ := trace.TraceIDFromHex("0102030405060708090a0b0c0d0e0f10")
-	spanID, _ := trace.SpanIDFromHex("0102030405060708")
-	parentSpanID, _ := trace.SpanIDFromHex("0807060504030201")
-
-	linkTraceID, _ := trace.TraceIDFromHex("0102030405060709090a0b0c0d0e0f11")
-	linkSpanID, _ := trace.SpanIDFromHex("0102030405060709")
-
-	eventNameValue := "event-test"
-	eventDropped := int64(10)
-	keyValue := "value"
-	statusCodeValue := "ERROR"
-	doubleValue := 123.456
-	intValue := int64(123)
-	boolTrue := true
-	arrValue := "[0,1,2,3]"
-	statusMessage := "this is a problem"
-	spanKind := "client"
-	rv1 := "rv11"
-	rv2 := int64(5)
-	instrLibName := "instrumentation-library"
-	instrLibVersion := "semver:1.0.0"
-
-	tests := []struct {
-		name string
-		data tracetest.SpanStub
-		want *gen.Span
-	}{
-		{
-			name: "no status description",
-			data: tracetest.SpanStub{
-				SpanContext: trace.NewSpanContext(trace.SpanContextConfig{
-					TraceID: traceID,
-					SpanID:  spanID,
-				}),
-				Name:      "/foo",
-				StartTime: now,
-				EndTime:   now,
-				Status:    sdktrace.Status{Code: codes.Error},
-				SpanKind:  trace.SpanKindClient,
-				InstrumentationLibrary: instrumentation.Library{
-					Name:    instrLibName,
-					Version: instrLibVersion,
-				},
-			},
-			want: &gen.Span{
-				TraceIdLow:    651345242494996240,
-				TraceIdHigh:   72623859790382856,
-				SpanId:        72623859790382856,
-				OperationName: "/foo",
-				StartTime:     now.UnixNano() / 1000,
-				Duration:      0,
-				Tags: []*gen.Tag{
-					{Key: keyError, VType: gen.TagType_BOOL, VBool: &boolTrue},
-					{Key: keyInstrumentationLibraryName, VType: gen.TagType_STRING, VStr: &instrLibName},
-					{Key: keyInstrumentationLibraryVersion, VType: gen.TagType_STRING, VStr: &instrLibVersion},
-					{Key: keyStatusCode, VType: gen.TagType_STRING, VStr: &statusCodeValue},
-					// Should not have a status message because it was unset
-					{Key: keySpanKind, VType: gen.TagType_STRING, VStr: &spanKind},
-				},
-			},
-		},
-		{
-			name: "no parent",
-			data: tracetest.SpanStub{
-				SpanContext: trace.NewSpanContext(trace.SpanContextConfig{
-					TraceID: traceID,
-					SpanID:  spanID,
-				}),
-				Name:      "/foo",
-				StartTime: now,
-				EndTime:   now,
-				Links: []sdktrace.Link{
-					{
-						SpanContext: trace.NewSpanContext(trace.SpanContextConfig{
-							TraceID: linkTraceID,
-							SpanID:  linkSpanID,
-						}),
-					},
-				},
-				Attributes: []attribute.KeyValue{
-					attribute.String("key", keyValue),
-					attribute.Float64("double", doubleValue),
-					attribute.Int64("int", intValue),
-				},
-				Events: []sdktrace.Event{
-					{
-						Name:                  eventNameValue,
-						Attributes:            []attribute.KeyValue{attribute.String("k1", keyValue)},
-						DroppedAttributeCount: int(eventDropped),
-						Time:                  now,
-					},
-				},
-				Status: sdktrace.Status{
-					Code:        codes.Error,
-					Description: statusMessage,
-				},
-				SpanKind: trace.SpanKindClient,
-				InstrumentationLibrary: instrumentation.Library{
-					Name:    instrLibName,
-					Version: instrLibVersion,
-				},
-			},
-			want: &gen.Span{
-				TraceIdLow:    651345242494996240,
-				TraceIdHigh:   72623859790382856,
-				SpanId:        72623859790382856,
-				OperationName: "/foo",
-				StartTime:     now.UnixNano() / 1000,
-				Duration:      0,
-				Tags: []*gen.Tag{
-					{Key: "double", VType: gen.TagType_DOUBLE, VDouble: &doubleValue},
-					{Key: "key", VType: gen.TagType_STRING, VStr: &keyValue},
-					{Key: "int", VType: gen.TagType_LONG, VLong: &intValue},
-					{Key: keyError, VType: gen.TagType_BOOL, VBool: &boolTrue},
-					{Key: keyInstrumentationLibraryName, VType: gen.TagType_STRING, VStr: &instrLibName},
-					{Key: keyInstrumentationLibraryVersion, VType: gen.TagType_STRING, VStr: &instrLibVersion},
-					{Key: keyStatusCode, VType: gen.TagType_STRING, VStr: &statusCodeValue},
-					{Key: keyStatusMessage, VType: gen.TagType_STRING, VStr: &statusMessage},
-					{Key: keySpanKind, VType: gen.TagType_STRING, VStr: &spanKind},
-				},
-				References: []*gen.SpanRef{
-					{
-						RefType:     gen.SpanRefType_FOLLOWS_FROM,
-						TraceIdHigh: int64(binary.BigEndian.Uint64(linkTraceID[0:8])),
-						TraceIdLow:  int64(binary.BigEndian.Uint64(linkTraceID[8:16])),
-						SpanId:      int64(binary.BigEndian.Uint64(linkSpanID[:])),
-					},
-				},
-				Logs: []*gen.Log{
-					{
-						Timestamp: now.UnixNano() / 1000,
-						Fields: []*gen.Tag{
-							{
-								Key:   keyEventName,
-								VStr:  &eventNameValue,
-								VType: gen.TagType_STRING,
-							},
-							{
-								Key:   "k1",
-								VStr:  &keyValue,
-								VType: gen.TagType_STRING,
-							},
-							{
-								Key:   keyDroppedAttributeCount,
-								VLong: &eventDropped,
-								VType: gen.TagType_LONG,
-							},
-						},
-					},
-				},
-			},
-		},
-		{
-			name: "with parent",
-			data: tracetest.SpanStub{
-				SpanContext: trace.NewSpanContext(trace.SpanContextConfig{
-					TraceID: traceID,
-					SpanID:  spanID,
-				}),
-				Parent: trace.NewSpanContext(trace.SpanContextConfig{
-					TraceID: traceID,
-					SpanID:  parentSpanID,
-				}),
-				Links: []sdktrace.Link{
-					{
-						SpanContext: trace.NewSpanContext(trace.SpanContextConfig{
-							TraceID: linkTraceID,
-							SpanID:  linkSpanID,
-						}),
-					},
-				},
-				Name:      "/foo",
-				StartTime: now,
-				EndTime:   now,
-				Attributes: []attribute.KeyValue{
-					attribute.IntSlice("arr", []int{0, 1, 2, 3}),
-				},
-				Status: sdktrace.Status{
-					Code:        codes.Unset,
-					Description: statusMessage,
-				},
-				SpanKind: trace.SpanKindInternal,
-				InstrumentationLibrary: instrumentation.Library{
-					Name:    instrLibName,
-					Version: instrLibVersion,
-				},
-			},
-			want: &gen.Span{
-				TraceIdLow:    651345242494996240,
-				TraceIdHigh:   72623859790382856,
-				SpanId:        72623859790382856,
-				ParentSpanId:  578437695752307201,
-				OperationName: "/foo",
-				StartTime:     now.UnixNano() / 1000,
-				Duration:      0,
-				Tags: []*gen.Tag{
-					// status code, message and span kind should NOT be populated
-					{Key: "arr", VType: gen.TagType_STRING, VStr: &arrValue},
-					{Key: keyInstrumentationLibraryName, VType: gen.TagType_STRING, VStr: &instrLibName},
-					{Key: keyInstrumentationLibraryVersion, VType: gen.TagType_STRING, VStr: &instrLibVersion},
-				},
-				References: []*gen.SpanRef{
-					{
-						RefType:     gen.SpanRefType_FOLLOWS_FROM,
-						TraceIdHigh: int64(binary.BigEndian.Uint64(linkTraceID[0:8])),
-						TraceIdLow:  int64(binary.BigEndian.Uint64(linkTraceID[8:16])),
-						SpanId:      int64(binary.BigEndian.Uint64(linkSpanID[:])),
-					},
-				},
-			},
-		},
-		{
-			name: "resources do not affect the tags",
-			data: tracetest.SpanStub{
-				SpanContext: trace.NewSpanContext(trace.SpanContextConfig{
-					TraceID: traceID,
-					SpanID:  spanID,
-				}),
-				Parent: trace.NewSpanContext(trace.SpanContextConfig{
-					TraceID: traceID,
-					SpanID:  parentSpanID,
-				}),
-				Name:      "/foo",
-				StartTime: now,
-				EndTime:   now,
-				Resource: resource.NewSchemaless(
-					attribute.String("rk1", rv1),
-					attribute.Int64("rk2", rv2),
-					semconv.ServiceName("service name"),
-				),
-				Status: sdktrace.Status{
-					Code:        codes.Unset,
-					Description: statusMessage,
-				},
-				SpanKind: trace.SpanKindInternal,
-				InstrumentationLibrary: instrumentation.Library{
-					Name:    instrLibName,
-					Version: instrLibVersion,
-				},
-			},
-			want: &gen.Span{
-				TraceIdLow:    651345242494996240,
-				TraceIdHigh:   72623859790382856,
-				SpanId:        72623859790382856,
-				ParentSpanId:  578437695752307201,
-				OperationName: "/foo",
-				StartTime:     now.UnixNano() / 1000,
-				Duration:      0,
-				Tags: []*gen.Tag{
-					{Key: keyInstrumentationLibraryName, VType: gen.TagType_STRING, VStr: &instrLibName},
-					{Key: keyInstrumentationLibraryVersion, VType: gen.TagType_STRING, VStr: &instrLibVersion},
-				},
-			},
-		},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			got := spanToThrift(tt.data.Snapshot())
-			sort.Slice(got.Tags, func(i, j int) bool {
-				return got.Tags[i].Key < got.Tags[j].Key
-			})
-			sort.Slice(tt.want.Tags, func(i, j int) bool {
-				return tt.want.Tags[i].Key < tt.want.Tags[j].Key
-			})
-			if diff := cmp.Diff(got, tt.want); diff != "" {
-				t.Errorf("Diff%v", diff)
-			}
-		})
-	}
-}
-
-func TestExporterShutdownHonorsCancel(t *testing.T) {
-	ctx, cancel := context.WithCancel(context.Background())
-	cancel()
-
-	e, err := New(withTestCollectorEndpoint())
-	require.NoError(t, err)
-	assert.EqualError(t, e.Shutdown(ctx), context.Canceled.Error())
-}
-
-func TestExporterShutdownHonorsTimeout(t *testing.T) {
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
-	<-ctx.Done()
-
-	e, err := New(withTestCollectorEndpoint())
-	require.NoError(t, err)
-	assert.EqualError(t, e.Shutdown(ctx), context.DeadlineExceeded.Error())
-	cancel()
-}
-
-func TestErrorOnExportShutdownExporter(t *testing.T) {
-	e, err := New(withTestCollectorEndpoint())
-	require.NoError(t, err)
-	assert.NoError(t, e.Shutdown(context.Background()))
-	assert.NoError(t, e.ExportSpans(context.Background(), nil))
-}
-
-func TestExporterExportSpansHonorsCancel(t *testing.T) {
-	e, err := New(withTestCollectorEndpoint())
-	require.NoError(t, err)
-	now := time.Now()
-	ss := tracetest.SpanStubs{
-		{
-			Name: "s1",
-			Resource: resource.NewSchemaless(
-				semconv.ServiceName("name"),
-				attribute.Key("r1").String("v1"),
-			),
-			StartTime: now,
-			EndTime:   now,
-		},
-		{
-			Name: "s2",
-			Resource: resource.NewSchemaless(
-				semconv.ServiceName("name"),
-				attribute.Key("r2").String("v2"),
-			),
-			StartTime: now,
-			EndTime:   now,
-		},
-	}
-	ctx, cancel := context.WithCancel(context.Background())
-	cancel()
-
-	assert.EqualError(t, e.ExportSpans(ctx, ss.Snapshots()), context.Canceled.Error())
-}
-
-func TestExporterExportSpansHonorsTimeout(t *testing.T) {
-	e, err := New(withTestCollectorEndpoint())
-	require.NoError(t, err)
-	now := time.Now()
-	ss := tracetest.SpanStubs{
-		{
-			Name: "s1",
-			Resource: resource.NewSchemaless(
-				semconv.ServiceName("name"),
-				attribute.Key("r1").String("v1"),
-			),
-			StartTime: now,
-			EndTime:   now,
-		},
-		{
-			Name: "s2",
-			Resource: resource.NewSchemaless(
-				semconv.ServiceName("name"),
-				attribute.Key("r2").String("v2"),
-			),
-			StartTime: now,
-			EndTime:   now,
-		},
-	}
-	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
-	defer cancel()
-	<-ctx.Done()
-
-	assert.EqualError(t, e.ExportSpans(ctx, ss.Snapshots()), context.DeadlineExceeded.Error())
-}
-
-func TestJaegerBatchList(t *testing.T) {
-	newString := func(value string) *string {
-		return &value
-	}
-	spanKind := "unspecified"
-	now := time.Now()
-
-	testCases := []struct {
-		name               string
-		roSpans            []sdktrace.ReadOnlySpan
-		defaultServiceName string
-		expectedBatchList  []*gen.Batch
-	}{
-		{
-			name:              "no span shots",
-			roSpans:           nil,
-			expectedBatchList: nil,
-		},
-		{
-			name: "span's snapshot contains nil span",
-			roSpans: []sdktrace.ReadOnlySpan{
-				tracetest.SpanStub{
-					Name: "s1",
-					Resource: resource.NewSchemaless(
-						semconv.ServiceName("name"),
-						attribute.Key("r1").String("v1"),
-					),
-					StartTime: now,
-					EndTime:   now,
-				}.Snapshot(),
-				nil,
-			},
-			expectedBatchList: []*gen.Batch{
-				{
-					Process: &gen.Process{
-						ServiceName: "name",
-						Tags: []*gen.Tag{
-							{Key: "r1", VType: gen.TagType_STRING, VStr: newString("v1")},
-						},
-					},
-					Spans: []*gen.Span{
-						{
-							OperationName: "s1",
-							Tags: []*gen.Tag{
-								{Key: keySpanKind, VType: gen.TagType_STRING, VStr: &spanKind},
-							},
-							StartTime: now.UnixNano() / 1000,
-						},
-					},
-				},
-			},
-		},
-		{
-			name: "merge spans that have the same resources",
-			roSpans: tracetest.SpanStubs{
-				{
-					Name: "s1",
-					Resource: resource.NewSchemaless(
-						semconv.ServiceName("name"),
-						attribute.Key("r1").String("v1"),
-					),
-					StartTime: now,
-					EndTime:   now,
-				},
-				{
-					Name: "s2",
-					Resource: resource.NewSchemaless(
-						semconv.ServiceName("name"),
-						attribute.Key("r1").String("v1"),
-					),
-					StartTime: now,
-					EndTime:   now,
-				},
-				{
-					Name: "s3",
-					Resource: resource.NewSchemaless(
-						semconv.ServiceName("name"),
-						attribute.Key("r2").String("v2"),
-					),
-					StartTime: now,
-					EndTime:   now,
-				},
-			}.Snapshots(),
-			expectedBatchList: []*gen.Batch{
-				{
-					Process: &gen.Process{
-						ServiceName: "name",
-						Tags: []*gen.Tag{
-							{Key: "r1", VType: gen.TagType_STRING, VStr: newString("v1")},
-						},
-					},
-					Spans: []*gen.Span{
-						{
-							OperationName: "s1",
-							Tags: []*gen.Tag{
-								{Key: "span.kind", VType: gen.TagType_STRING, VStr: &spanKind},
-							},
-							StartTime: now.UnixNano() / 1000,
-						},
-						{
-							OperationName: "s2",
-							Tags: []*gen.Tag{
-								{Key: "span.kind", VType: gen.TagType_STRING, VStr: &spanKind},
-							},
-							StartTime: now.UnixNano() / 1000,
-						},
-					},
-				},
-				{
-					Process: &gen.Process{
-						ServiceName: "name",
-						Tags: []*gen.Tag{
-							{Key: "r2", VType: gen.TagType_STRING, VStr: newString("v2")},
-						},
-					},
-					Spans: []*gen.Span{
-						{
-							OperationName: "s3",
-							Tags: []*gen.Tag{
-								{Key: "span.kind", VType: gen.TagType_STRING, VStr: &spanKind},
-							},
-							StartTime: now.UnixNano() / 1000,
-						},
-					},
-				},
-			},
-		},
-		{
-			name: "no service name in spans",
-			roSpans: tracetest.SpanStubs{
-				{
-					Name: "s1",
-					Resource: resource.NewSchemaless(
-						attribute.Key("r1").String("v1"),
-					),
-					StartTime: now,
-					EndTime:   now,
-				},
-			}.Snapshots(),
-			defaultServiceName: "default service name",
-			expectedBatchList: []*gen.Batch{
-				{
-					Process: &gen.Process{
-						ServiceName: "default service name",
-						Tags: []*gen.Tag{
-							{Key: "r1", VType: gen.TagType_STRING, VStr: newString("v1")},
-						},
-					},
-					Spans: []*gen.Span{
-						{
-							OperationName: "s1",
-							Tags: []*gen.Tag{
-								{Key: "span.kind", VType: gen.TagType_STRING, VStr: &spanKind},
-							},
-							StartTime: now.UnixNano() / 1000,
-						},
-					},
-				},
-			},
-		},
-	}
-
-	for _, tc := range testCases {
-		t.Run(tc.name, func(t *testing.T) {
-			batchList := jaegerBatchList(tc.roSpans, tc.defaultServiceName)
-
-			assert.ElementsMatch(t, tc.expectedBatchList, batchList)
-		})
-	}
-}
-
-func TestProcess(t *testing.T) {
-	v1 := "v1"
-
-	testCases := []struct {
-		name               string
-		res                *resource.Resource
-		defaultServiceName string
-		expectedProcess    *gen.Process
-	}{
-		{
-			name: "resources contain service name",
-			res: resource.NewSchemaless(
-				semconv.ServiceName("service name"),
-				attribute.Key("r1").String("v1"),
-			),
-			defaultServiceName: "default service name",
-			expectedProcess: &gen.Process{
-				ServiceName: "service name",
-				Tags: []*gen.Tag{
-					{Key: "r1", VType: gen.TagType_STRING, VStr: &v1},
-				},
-			},
-		},
-		{
-			name:               "resources don't have service name",
-			res:                resource.NewSchemaless(attribute.Key("r1").String("v1")),
-			defaultServiceName: "default service name",
-			expectedProcess: &gen.Process{
-				ServiceName: "default service name",
-				Tags: []*gen.Tag{
-					{Key: "r1", VType: gen.TagType_STRING, VStr: &v1},
-				},
-			},
-		},
-	}
-
-	for _, tc := range testCases {
-		t.Run(tc.name, func(t *testing.T) {
-			pro := process(tc.res, tc.defaultServiceName)
-
-			assert.Equal(t, tc.expectedProcess, pro)
-		})
-	}
-}
diff --git a/exporters/jaeger/reconnecting_udp_client.go b/exporters/jaeger/reconnecting_udp_client.go
deleted file mode 100644
index 88055c8a300..00000000000
--- a/exporters/jaeger/reconnecting_udp_client.go
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
-	"fmt"
-	"net"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"github.com/go-logr/logr"
-)
-
-// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is
-// different than the current conn then the new address is dialed and the conn is swapped.
-type reconnectingUDPConn struct {
-	// `sync/atomic` expects the first word in an allocated struct to be 64-bit
-	// aligned on both ARM and x86-32. See https://goo.gl/zW7dgq for more details.
-	bufferBytes int64
-	hostPort    string
-	resolveFunc resolveFunc
-	dialFunc    dialFunc
-	logger      logr.Logger
-
-	connMtx   sync.RWMutex
-	conn      *net.UDPConn
-	destAddr  *net.UDPAddr
-	closeChan chan struct{}
-}
-
-type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error)
-type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error)
-
-// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is
-// different than the current conn then the new address is dialed and the conn is swapped.
-func newReconnectingUDPConn(hostPort string, bufferBytes int, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger logr.Logger) (*reconnectingUDPConn, error) {
-	conn := &reconnectingUDPConn{
-		hostPort:    hostPort,
-		resolveFunc: resolveFunc,
-		dialFunc:    dialFunc,
-		logger:      logger,
-		closeChan:   make(chan struct{}),
-		bufferBytes: int64(bufferBytes),
-	}
-
-	if err := conn.attemptResolveAndDial(); err != nil {
-		conn.logf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout)
-	}
-
-	go conn.reconnectLoop(resolveTimeout)
-
-	return conn, nil
-}
-
-func (c *reconnectingUDPConn) logf(format string, args ...interface{}) {
-	if c.logger != emptyLogger {
-		c.logger.Info(format, args...)
-	}
-}
-
-func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) {
-	ticker := time.NewTicker(resolveTimeout)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-c.closeChan:
-			return
-		case <-ticker.C:
-			if err := c.attemptResolveAndDial(); err != nil {
-				c.logf("%s", err.Error())
-			}
-		}
-	}
-}
-
-func (c *reconnectingUDPConn) attemptResolveAndDial() error {
-	newAddr, err := c.resolveFunc("udp", c.hostPort)
-	if err != nil {
-		return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err)
-	}
-
-	c.connMtx.RLock()
-	curAddr := c.destAddr
-	c.connMtx.RUnlock()
-
-	// dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn
-	if curAddr != nil && newAddr.String() == curAddr.String() {
-		return nil
-	}
-
-	if err := c.attemptDialNewAddr(newAddr); err != nil {
-		return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err)
-	}
-
-	return nil
-}
-
-func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error {
-	connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr)
-	if err != nil {
-		return err
-	}
-
-	if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 {
-		if err = connUDP.SetWriteBuffer(bufferBytes); err != nil {
-			return err
-		}
-	}
-
-	c.connMtx.Lock()
-	c.destAddr = newAddr
-	// store prev to close later
-	prevConn := c.conn
-	c.conn = connUDP
-	c.connMtx.Unlock()
-
-	if prevConn != nil {
-		return prevConn.Close()
-	}
-
-	return nil
-}
-
-// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning.
-func (c *reconnectingUDPConn) Write(b []byte) (int, error) {
-	var bytesWritten int
-	var err error
-
-	c.connMtx.RLock()
-	conn := c.conn
-	c.connMtx.RUnlock()
-
-	if conn == nil {
-		// if connection is not initialized indicate this with err in order to hook into retry logic
-		err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved")
-	} else {
-		bytesWritten, err = conn.Write(b)
-	}
-
-	if err == nil {
-		return bytesWritten, nil
-	}
-
-	// attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again
-	if reconnErr := c.attemptResolveAndDial(); reconnErr == nil {
-		c.connMtx.RLock()
-		conn := c.conn
-		c.connMtx.RUnlock()
-
-		return conn.Write(b)
-	}
-
-	// return original error if reconn fails
-	return bytesWritten, err
-}
-
-// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation.
-func (c *reconnectingUDPConn) Close() error {
-	close(c.closeChan)
-
-	// acquire rw lock before closing conn to ensure calls to Write drain
-	c.connMtx.Lock()
-	defer c.connMtx.Unlock()
-
-	if c.conn != nil {
-		return c.conn.Close()
-	}
-
-	return nil
-}
-
-// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held
-// and SetWriteBuffer is called store bufferBytes to be set for new conns.
-func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error {
-	var err error
-
-	c.connMtx.RLock()
-	conn := c.conn
-	c.connMtx.RUnlock()
-
-	if conn != nil {
-		err = c.conn.SetWriteBuffer(bytes)
-	}
-
-	if err == nil {
-		atomic.StoreInt64(&c.bufferBytes, int64(bytes))
-	}
-
-	return err
-}
diff --git a/exporters/jaeger/reconnecting_udp_client_test.go b/exporters/jaeger/reconnecting_udp_client_test.go
deleted file mode 100644
index 8d29eaf5eb4..00000000000
--- a/exporters/jaeger/reconnecting_udp_client_test.go
+++ /dev/null
@@ -1,501 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger
-
-import (
-	"context"
-	"crypto/rand"
-	"fmt"
-	"net"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/mock"
-	"github.com/stretchr/testify/require"
-)
-
-type mockResolver struct {
-	mock.Mock
-}
-
-func (m *mockResolver) ResolveUDPAddr(network string, hostPort string) (*net.UDPAddr, error) {
-	args := m.Called(network, hostPort)
-
-	a0 := args.Get(0)
-	if a0 == nil {
-		return (*net.UDPAddr)(nil), args.Error(1)
-	}
-	return a0.(*net.UDPAddr), args.Error(1)
-}
-
-type mockDialer struct {
-	mock.Mock
-}
-
-func (m *mockDialer) DialUDP(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error) {
-	args := m.Called(network, laddr, raddr)
-
-	a0 := args.Get(0)
-	if a0 == nil {
-		return (*net.UDPConn)(nil), args.Error(1)
-	}
-
-	return a0.(*net.UDPConn), args.Error(1)
-}
-
-func newUDPListener() (net.PacketConn, error) {
-	return net.ListenPacket("udp", "127.0.0.1:0")
-}
-
-func newUDPConn() (net.PacketConn, *net.UDPConn, error) {
-	mockServer, err := newUDPListener()
-	if err != nil {
-		return nil, nil, err
-	}
-
-	addr, err := net.ResolveUDPAddr("udp", mockServer.LocalAddr().String())
-	if err != nil {
-		// Best effort.
-		_ = mockServer.Close()
-		return nil, nil, err
-	}
-
-	conn, err := net.DialUDP("udp", nil, addr)
-	if err != nil {
-		// Best effort.
-		_ = mockServer.Close()
-		return nil, nil, err
-	}
-
-	return mockServer, conn, nil
-}
-
-func assertConnWritable(t *testing.T, conn udpConn, serverConn net.PacketConn) {
-	expectedString := "yo this is a test"
-	_, err := conn.Write([]byte(expectedString))
-	require.NoError(t, err)
-
-	buf := make([]byte, len(expectedString))
-	err = serverConn.SetReadDeadline(time.Now().Add(time.Second))
-	require.NoError(t, err)
-
-	_, _, err = serverConn.ReadFrom(buf)
-	require.NoError(t, err)
-	require.Equal(t, []byte(expectedString), buf)
-}
-
-func waitForCallWithTimeout(call *mock.Call) bool {
-	called := make(chan struct{})
-	call.Run(func(args mock.Arguments) {
-		if !isChannelClosed(called) {
-			close(called)
-		}
-	})
-
-	var wasCalled bool
-	// wait at most 100 milliseconds for the second call of ResolveUDPAddr that is supposed to fail
-	ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*100)
-	select {
-	case <-called:
-		wasCalled = true
-	case <-ctx.Done():
-		fmt.Println("timed out")
-	}
-	cancel()
-
-	return wasCalled
-}
-
-func isChannelClosed(ch <-chan struct{}) bool {
-	select {
-	case <-ch:
-		return true
-	default:
-	}
-	return false
-}
-
-func waitForConnCondition(conn *reconnectingUDPConn, condition func(conn *reconnectingUDPConn) bool) bool {
-	var conditionVal bool
-	for i := 0; i < 10; i++ {
-		conn.connMtx.RLock()
-		conditionVal = condition(conn)
-		conn.connMtx.RUnlock()
-		if conditionVal || i >= 9 {
-			break
-		}
-
-		time.Sleep(time.Millisecond * 10)
-	}
-
-	return conditionVal
-}
-
-func newMockUDPAddr(t *testing.T, port int) *net.UDPAddr {
-	buf := make([]byte, 4)
-	// random is not seeded to ensure tests are deterministic (also does not matter if ip is valid)
-	_, err := rand.Read(buf)
-	require.NoError(t, err)
-
-	return &net.UDPAddr{
-		IP:   net.IPv4(buf[0], buf[1], buf[2], buf[3]),
-		Port: port,
-	}
-}
-
-func TestNewResolvedUDPConn(t *testing.T) {
-	hostPort := "blahblah:34322"
-
-	mockServer, clientConn, err := newUDPConn()
-	require.NoError(t, err)
-	defer mockServer.Close()
-
-	mockUDPAddr := newMockUDPAddr(t, 34322)
-
-	resolver := mockResolver{}
-	resolver.
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(mockUDPAddr, nil).
-		Once()
-
-	dialer := mockDialer{}
-	dialer.
-		On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr).
-		Return(clientConn, nil).
-		Once()
-
-	conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Hour, resolver.ResolveUDPAddr, dialer.DialUDP, emptyLogger)
-	assert.NoError(t, err)
-	require.NotNil(t, conn)
-
-	err = conn.Close()
-	assert.NoError(t, err)
-
-	// assert the actual connection was closed
-	assert.Error(t, clientConn.Close())
-
-	resolver.AssertExpectations(t)
-	dialer.AssertExpectations(t)
-}
-
-func TestResolvedUDPConnWrites(t *testing.T) {
-	hostPort := "blahblah:34322"
-
-	mockServer, clientConn, err := newUDPConn()
-	require.NoError(t, err)
-	defer mockServer.Close()
-
-	mockUDPAddr := newMockUDPAddr(t, 34322)
-
-	resolver := mockResolver{}
-	resolver.
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(mockUDPAddr, nil).
-		Once()
-
-	dialer := mockDialer{}
-	dialer.
-		On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr).
-		Return(clientConn, nil).
-		Once()
-
-	conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Hour, resolver.ResolveUDPAddr, dialer.DialUDP, emptyLogger)
-	assert.NoError(t, err)
-	require.NotNil(t, conn)
-
-	assertConnWritable(t, conn, mockServer)
-
-	err = conn.Close()
-	assert.NoError(t, err)
-
-	// assert the actual connection was closed
-	assert.Error(t, clientConn.Close())
-
-	resolver.AssertExpectations(t)
-	dialer.AssertExpectations(t)
-}
-
-func TestResolvedUDPConnEventuallyDials(t *testing.T) {
-	hostPort := "blahblah:34322"
-
-	mockServer, clientConn, err := newUDPConn()
-	require.NoError(t, err)
-	defer mockServer.Close()
-
-	mockUDPAddr := newMockUDPAddr(t, 34322)
-
-	resolver := mockResolver{}
-	resolver.
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(nil, fmt.Errorf("failed to resolve")).Once().
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(mockUDPAddr, nil)
-
-	dialer := mockDialer{}
-	dialCall := dialer.
-		On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr).
-		Return(clientConn, nil).Once()
-
-	conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, emptyLogger)
-	assert.NoError(t, err)
-	require.NotNil(t, conn)
-
-	err = conn.SetWriteBuffer(udpPacketMaxLength)
-	assert.NoError(t, err)
-
-	wasCalled := waitForCallWithTimeout(dialCall)
-	assert.True(t, wasCalled)
-
-	connEstablished := waitForConnCondition(conn, func(conn *reconnectingUDPConn) bool {
-		return conn.conn != nil
-	})
-
-	assert.True(t, connEstablished)
-
-	assertConnWritable(t, conn, mockServer)
-	assertSockBufferSize(t, udpPacketMaxLength, clientConn)
-
-	err = conn.Close()
-	assert.NoError(t, err)
-
-	// assert the actual connection was closed
-	assert.Error(t, clientConn.Close())
-
-	resolver.AssertExpectations(t)
-	dialer.AssertExpectations(t)
-}
-
-func TestResolvedUDPConnNoSwapIfFail(t *testing.T) {
-	hostPort := "blahblah:34322"
-
-	mockServer, clientConn, err := newUDPConn()
-	require.NoError(t, err)
-	defer mockServer.Close()
-
-	mockUDPAddr := newMockUDPAddr(t, 34322)
-
-	resolver := mockResolver{}
-	resolver.
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(mockUDPAddr, nil).Once()
-
-	failCall := resolver.On("ResolveUDPAddr", "udp", hostPort).
-		Return(nil, fmt.Errorf("resolve failed"))
-
-	dialer := mockDialer{}
-	dialer.
-		On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr).
-		Return(clientConn, nil).Once()
-
-	conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, emptyLogger)
-	assert.NoError(t, err)
-	require.NotNil(t, conn)
-
-	wasCalled := waitForCallWithTimeout(failCall)
-
-	assert.True(t, wasCalled)
-
-	assertConnWritable(t, conn, mockServer)
-
-	err = conn.Close()
-	assert.NoError(t, err)
-
-	// assert the actual connection was closed
-	assert.Error(t, clientConn.Close())
-
-	resolver.AssertExpectations(t)
-	dialer.AssertExpectations(t)
-}
-
-func TestResolvedUDPConnWriteRetry(t *testing.T) {
-	hostPort := "blahblah:34322"
-
-	mockServer, clientConn, err := newUDPConn()
-	require.NoError(t, err)
-	defer mockServer.Close()
-
-	mockUDPAddr := newMockUDPAddr(t, 34322)
-
-	resolver := mockResolver{}
-	resolver.
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(nil, fmt.Errorf("failed to resolve")).Once().
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(mockUDPAddr, nil).Once()
-
-	dialer := mockDialer{}
-	dialer.
-		On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr).
-		Return(clientConn, nil).Once()
-
-	conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, emptyLogger)
-	assert.NoError(t, err)
-	require.NotNil(t, conn)
-
-	err = conn.SetWriteBuffer(udpPacketMaxLength)
-	assert.NoError(t, err)
-
-	assertConnWritable(t, conn, mockServer)
-	assertSockBufferSize(t, udpPacketMaxLength, clientConn)
-
-	err = conn.Close()
-	assert.NoError(t, err)
-
-	// assert the actual connection was closed
-	assert.Error(t, clientConn.Close())
-
-	resolver.AssertExpectations(t)
-	dialer.AssertExpectations(t)
-}
-
-func TestResolvedUDPConnWriteRetryFails(t *testing.T) {
-	hostPort := "blahblah:34322"
-
-	resolver := mockResolver{}
-	resolver.
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(nil, fmt.Errorf("failed to resolve")).Twice()
-
-	dialer := mockDialer{}
-
-	conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, emptyLogger)
-	assert.NoError(t, err)
-	require.NotNil(t, conn)
-
-	err = conn.SetWriteBuffer(udpPacketMaxLength)
-	assert.NoError(t, err)
-
-	_, err = conn.Write([]byte("yo this is a test"))
-
-	assert.Error(t, err)
-
-	err = conn.Close()
-	assert.NoError(t, err)
-
-	resolver.AssertExpectations(t)
-	dialer.AssertExpectations(t)
-}
-
-func TestResolvedUDPConnChanges(t *testing.T) {
-	hostPort := "blahblah:34322"
-
-	mockServer, clientConn, err := newUDPConn()
-	require.NoError(t, err)
-	defer mockServer.Close()
-
-	mockUDPAddr1 := newMockUDPAddr(t, 34322)
-
-	mockServer2, clientConn2, err := newUDPConn()
-	require.NoError(t, err)
-	defer mockServer2.Close()
-
-	mockUDPAddr2 := newMockUDPAddr(t, 34322)
-
-	// ensure address doesn't duplicate mockUDPAddr1
-	for i := 0; i < 10 && mockUDPAddr2.IP.Equal(mockUDPAddr1.IP); i++ {
-		mockUDPAddr2 = newMockUDPAddr(t, 34322)
-	}
-
-	// this is really unlikely to ever fail the test, but its here as a safeguard
-	require.False(t, mockUDPAddr2.IP.Equal(mockUDPAddr1.IP))
-
-	resolver := mockResolver{}
-	resolver.
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(mockUDPAddr1, nil).Once().
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(mockUDPAddr2, nil)
-
-	dialer := mockDialer{}
-	dialer.
-		On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr1).
-		Return(clientConn, nil).Once()
-
-	secondDial := dialer.
-		On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr2).
-		Return(clientConn2, nil).Once()
-
-	conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, time.Millisecond*10, resolver.ResolveUDPAddr, dialer.DialUDP, emptyLogger)
-	assert.NoError(t, err)
-	require.NotNil(t, conn)
-
-	err = conn.SetWriteBuffer(udpPacketMaxLength)
-	assert.NoError(t, err)
-
-	wasCalled := waitForCallWithTimeout(secondDial)
-	assert.True(t, wasCalled)
-
-	connSwapped := waitForConnCondition(conn, func(conn *reconnectingUDPConn) bool {
-		return conn.conn == clientConn2
-	})
-
-	assert.True(t, connSwapped)
-
-	assertConnWritable(t, conn, mockServer2)
-	assertSockBufferSize(t, udpPacketMaxLength, clientConn2)
-
-	err = conn.Close()
-	assert.NoError(t, err)
-
-	// assert the prev connection was closed
-	assert.Error(t, clientConn.Close())
-
-	// assert the actual connection was closed
-	assert.Error(t, clientConn2.Close())
-
-	resolver.AssertExpectations(t)
-	dialer.AssertExpectations(t)
-}
-
-func TestResolvedUDPConnLoopWithoutChanges(t *testing.T) {
-	hostPort := "blahblah:34322"
-
-	mockServer, clientConn, err := newUDPConn()
-	require.NoError(t, err)
-	defer mockServer.Close()
-
-	mockUDPAddr := newMockUDPAddr(t, 34322)
-
-	resolver := mockResolver{}
-	resolver.
-		On("ResolveUDPAddr", "udp", hostPort).
-		Return(mockUDPAddr, nil)
-
-	dialer := mockDialer{}
-	dialer.
-		On("DialUDP", "udp", (*net.UDPAddr)(nil), mockUDPAddr).
-		Return(clientConn, nil).
-		Once()
-
-	resolveTimeout := 500 * time.Millisecond
-	conn, err := newReconnectingUDPConn(hostPort, udpPacketMaxLength, resolveTimeout, resolver.ResolveUDPAddr, dialer.DialUDP, emptyLogger)
-	assert.NoError(t, err)
-	require.NotNil(t, conn)
-	assert.Equal(t, mockUDPAddr, conn.destAddr)
-
-	// Waiting for one round of loop
-	time.Sleep(3 * resolveTimeout)
-	assert.Equal(t, mockUDPAddr, conn.destAddr)
-
-	err = conn.Close()
-	assert.NoError(t, err)
-
-	// assert the actual connection was closed
-	assert.Error(t, clientConn.Close())
-
-	resolver.AssertExpectations(t)
-	dialer.AssertExpectations(t)
-}
diff --git a/exporters/jaeger/uploader.go b/exporters/jaeger/uploader.go
deleted file mode 100644
index f65e3a6782a..00000000000
--- a/exporters/jaeger/uploader.go
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jaeger // import "go.opentelemetry.io/otel/exporters/jaeger"
-
-import (
-	"bytes"
-	"context"
-	"fmt"
-	"io"
-	"log"
-	"net/http"
-	"time"
-
-	"github.com/go-logr/logr"
-	"github.com/go-logr/stdr"
-
-	gen "go.opentelemetry.io/otel/exporters/jaeger/internal/gen-go/jaeger"
-	"go.opentelemetry.io/otel/exporters/jaeger/internal/third_party/thrift/lib/go/thrift"
-)
-
-// batchUploader send a batch of spans to Jaeger.
-type batchUploader interface {
-	upload(context.Context, *gen.Batch) error
-	shutdown(context.Context) error
-}
-
-// EndpointOption configures a Jaeger endpoint.
-type EndpointOption interface {
-	newBatchUploader() (batchUploader, error)
-}
-
-type endpointOptionFunc func() (batchUploader, error)
-
-func (fn endpointOptionFunc) newBatchUploader() (batchUploader, error) {
-	return fn()
-}
-
-// WithAgentEndpoint configures the Jaeger exporter to send spans to a Jaeger agent
-// over compact thrift protocol. This will use the following environment variables for
-// configuration if no explicit option is provided:
-//
-// - OTEL_EXPORTER_JAEGER_AGENT_HOST is used for the agent address host
-// - OTEL_EXPORTER_JAEGER_AGENT_PORT is used for the agent address port
-//
-// The passed options will take precedence over any environment variables and default values
-// will be used if neither are provided.
-func WithAgentEndpoint(options ...AgentEndpointOption) EndpointOption {
-	return endpointOptionFunc(func() (batchUploader, error) {
-		cfg := agentEndpointConfig{
-			agentClientUDPParams{
-				AttemptReconnecting: true,
-				Host:                envOr(envAgentHost, "localhost"),
-				Port:                envOr(envAgentPort, "6831"),
-			},
-		}
-		for _, opt := range options {
-			cfg = opt.apply(cfg)
-		}
-
-		client, err := newAgentClientUDP(cfg.agentClientUDPParams)
-		if err != nil {
-			return nil, err
-		}
-
-		return &agentUploader{client: client}, nil
-	})
-}
-
-// AgentEndpointOption configures a Jaeger agent endpoint.
-type AgentEndpointOption interface {
-	apply(agentEndpointConfig) agentEndpointConfig
-}
-
-type agentEndpointConfig struct {
-	agentClientUDPParams
-}
-
-type agentEndpointOptionFunc func(agentEndpointConfig) agentEndpointConfig
-
-func (fn agentEndpointOptionFunc) apply(cfg agentEndpointConfig) agentEndpointConfig {
-	return fn(cfg)
-}
-
-// WithAgentHost sets a host to be used in the agent client endpoint.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_AGENT_HOST environment variable.
-// If this option is not passed and the env var is not set, "localhost" will be used by default.
-func WithAgentHost(host string) AgentEndpointOption {
-	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
-		o.Host = host
-		return o
-	})
-}
-
-// WithAgentPort sets a port to be used in the agent client endpoint.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_AGENT_PORT environment variable.
-// If this option is not passed and the env var is not set, "6831" will be used by default.
-func WithAgentPort(port string) AgentEndpointOption {
-	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
-		o.Port = port
-		return o
-	})
-}
-
-var emptyLogger = logr.Logger{}
-
-// WithLogger sets a logger to be used by agent client.
-// WithLogger and WithLogr will overwrite each other.
-func WithLogger(logger *log.Logger) AgentEndpointOption {
-	return WithLogr(stdr.New(logger))
-}
-
-// WithLogr sets a logr.Logger to be used by agent client.
-// WithLogr and WithLogger will overwrite each other.
-func WithLogr(logger logr.Logger) AgentEndpointOption {
-	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
-		o.Logger = logger
-		return o
-	})
-}
-
-// WithDisableAttemptReconnecting sets option to disable reconnecting udp client.
-func WithDisableAttemptReconnecting() AgentEndpointOption {
-	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
-		o.AttemptReconnecting = false
-		return o
-	})
-}
-
-// WithAttemptReconnectingInterval sets the interval between attempts to re resolve agent endpoint.
-func WithAttemptReconnectingInterval(interval time.Duration) AgentEndpointOption {
-	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
-		o.AttemptReconnectInterval = interval
-		return o
-	})
-}
-
-// WithMaxPacketSize sets the maximum UDP packet size for transport to the Jaeger agent.
-func WithMaxPacketSize(size int) AgentEndpointOption {
-	return agentEndpointOptionFunc(func(o agentEndpointConfig) agentEndpointConfig {
-		o.MaxPacketSize = size
-		return o
-	})
-}
-
-// WithCollectorEndpoint defines the full URL to the Jaeger HTTP Thrift collector. This will
-// use the following environment variables for configuration if no explicit option is provided:
-//
-// - OTEL_EXPORTER_JAEGER_ENDPOINT is the HTTP endpoint for sending spans directly to a collector.
-// - OTEL_EXPORTER_JAEGER_USER is the username to be sent as authentication to the collector endpoint.
-// - OTEL_EXPORTER_JAEGER_PASSWORD is the password to be sent as authentication to the collector endpoint.
-//
-// The passed options will take precedence over any environment variables.
-// If neither values are provided for the endpoint, the default value of "http://localhost:14268/api/traces" will be used.
-// If neither values are provided for the username or the password, they will not be set since there is no default.
-func WithCollectorEndpoint(options ...CollectorEndpointOption) EndpointOption {
-	return endpointOptionFunc(func() (batchUploader, error) {
-		cfg := collectorEndpointConfig{
-			endpoint:   envOr(envEndpoint, "http://localhost:14268/api/traces"),
-			username:   envOr(envUser, ""),
-			password:   envOr(envPassword, ""),
-			httpClient: http.DefaultClient,
-		}
-
-		for _, opt := range options {
-			cfg = opt.apply(cfg)
-		}
-
-		return &collectorUploader{
-			endpoint:   cfg.endpoint,
-			username:   cfg.username,
-			password:   cfg.password,
-			httpClient: cfg.httpClient,
-		}, nil
-	})
-}
-
-// CollectorEndpointOption configures a Jaeger collector endpoint.
-type CollectorEndpointOption interface {
-	apply(collectorEndpointConfig) collectorEndpointConfig
-}
-
-type collectorEndpointConfig struct {
-	// endpoint for sending spans directly to a collector.
-	endpoint string
-
-	// username to be used for authentication with the collector endpoint.
-	username string
-
-	// password to be used for authentication with the collector endpoint.
-	password string
-
-	// httpClient to be used to make requests to the collector endpoint.
-	httpClient *http.Client
-}
-
-type collectorEndpointOptionFunc func(collectorEndpointConfig) collectorEndpointConfig
-
-func (fn collectorEndpointOptionFunc) apply(cfg collectorEndpointConfig) collectorEndpointConfig {
-	return fn(cfg)
-}
-
-// WithEndpoint is the URL for the Jaeger collector that spans are sent to.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_ENDPOINT environment variable.
-// If this option is not passed and the environment variable is not set,
-// "http://localhost:14268/api/traces" will be used by default.
-func WithEndpoint(endpoint string) CollectorEndpointOption {
-	return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
-		o.endpoint = endpoint
-		return o
-	})
-}
-
-// WithUsername sets the username to be used in the authorization header sent for all requests to the collector.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_USER environment variable.
-// If this option is not passed and the environment variable is not set, no username will be set.
-func WithUsername(username string) CollectorEndpointOption {
-	return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
-		o.username = username
-		return o
-	})
-}
-
-// WithPassword sets the password to be used in the authorization header sent for all requests to the collector.
-// This option overrides any value set for the
-// OTEL_EXPORTER_JAEGER_PASSWORD environment variable.
-// If this option is not passed and the environment variable is not set, no password will be set.
-func WithPassword(password string) CollectorEndpointOption {
-	return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
-		o.password = password
-		return o
-	})
-}
-
-// WithHTTPClient sets the http client to be used to make request to the collector endpoint.
-func WithHTTPClient(client *http.Client) CollectorEndpointOption {
-	return collectorEndpointOptionFunc(func(o collectorEndpointConfig) collectorEndpointConfig {
-		o.httpClient = client
-		return o
-	})
-}
-
-// agentUploader implements batchUploader interface sending batches to
-// Jaeger through the UDP agent.
-type agentUploader struct {
-	client *agentClientUDP
-}
-
-var _ batchUploader = (*agentUploader)(nil)
-
-func (a *agentUploader) shutdown(ctx context.Context) error {
-	done := make(chan error, 1)
-	go func() {
-		done <- a.client.Close()
-	}()
-
-	select {
-	case <-ctx.Done():
-		// Prioritize not blocking the calling thread and just leak the
-		// spawned goroutine to close the client.
-		return ctx.Err()
-	case err := <-done:
-		return err
-	}
-}
-
-func (a *agentUploader) upload(ctx context.Context, batch *gen.Batch) error {
-	return a.client.EmitBatch(ctx, batch)
-}
-
-// collectorUploader implements batchUploader interface sending batches to
-// Jaeger through the collector http endpoint.
-type collectorUploader struct {
-	endpoint   string
-	username   string
-	password   string
-	httpClient *http.Client
-}
-
-var _ batchUploader = (*collectorUploader)(nil)
-
-func (c *collectorUploader) shutdown(ctx context.Context) error {
-	// The Exporter will cancel any active exports and will prevent all
-	// subsequent exports, so nothing to do here.
-	return nil
-}
-
-func (c *collectorUploader) upload(ctx context.Context, batch *gen.Batch) error {
-	body, err := serialize(batch)
-	if err != nil {
-		return err
-	}
-	req, err := http.NewRequestWithContext(ctx, "POST", c.endpoint, body)
-	if err != nil {
-		return err
-	}
-	if c.username != "" && c.password != "" {
-		req.SetBasicAuth(c.username, c.password)
-	}
-	req.Header.Set("Content-Type", "application/x-thrift")
-
-	resp, err := c.httpClient.Do(req)
-	if err != nil {
-		return err
-	}
-
-	_, _ = io.Copy(io.Discard, resp.Body)
-	if err = resp.Body.Close(); err != nil {
-		return err
-	}
-
-	if resp.StatusCode < 200 || resp.StatusCode >= 300 {
-		return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode)
-	}
-	return nil
-}
-
-func serialize(obj thrift.TStruct) (*bytes.Buffer, error) {
-	buf := thrift.NewTMemoryBuffer()
-	if err := obj.Write(context.Background(), thrift.NewTBinaryProtocolConf(buf, &thrift.TConfiguration{})); err != nil {
-		return nil, err
-	}
-	return buf.Buffer, nil
-}
diff --git a/exporters/otlp/internal/config.go b/exporters/otlp/internal/config.go
deleted file mode 100644
index d0998fb1d04..00000000000
--- a/exporters/otlp/internal/config.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal contains common functionality for all OTLP exporters.
-//
-// Deprecated: package internal exists for historical compatibility, it should
-// not be used.
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
-
-import (
-	"fmt"
-	"path"
-	"strings"
-)
-
-// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead.
-func CleanPath(urlPath string, defaultPath string) string {
-	tmp := path.Clean(strings.TrimSpace(urlPath))
-	if tmp == "." {
-		return defaultPath
-	}
-	if !path.IsAbs(tmp) {
-		tmp = fmt.Sprintf("/%s", tmp)
-	}
-	return tmp
-}
diff --git a/exporters/otlp/internal/config_test.go b/exporters/otlp/internal/config_test.go
deleted file mode 100644
index 92a819a1f2b..00000000000
--- a/exporters/otlp/internal/config_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import "testing"
-
-func TestCleanPath(t *testing.T) {
-	type args struct {
-		urlPath     string
-		defaultPath string
-	}
-	tests := []struct {
-		name string
-		args args
-		want string
-	}{
-		{
-			name: "clean empty path",
-			args: args{
-				urlPath:     "",
-				defaultPath: "DefaultPath",
-			},
-			want: "DefaultPath",
-		},
-		{
-			name: "clean metrics path",
-			args: args{
-				urlPath:     "/prefix/v1/metrics",
-				defaultPath: "DefaultMetricsPath",
-			},
-			want: "/prefix/v1/metrics",
-		},
-		{
-			name: "clean traces path",
-			args: args{
-				urlPath:     "https://env_endpoint",
-				defaultPath: "DefaultTracesPath",
-			},
-			want: "/https:/env_endpoint",
-		},
-		{
-			name: "spaces trimmed",
-			args: args{
-				urlPath: " /dir",
-			},
-			want: "/dir",
-		},
-		{
-			name: "clean path empty",
-			args: args{
-				urlPath:     "dir/..",
-				defaultPath: "DefaultTracesPath",
-			},
-			want: "DefaultTracesPath",
-		},
-		{
-			name: "make absolute",
-			args: args{
-				urlPath: "dir/a",
-			},
-			want: "/dir/a",
-		},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			if got := CleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want {
-				t.Errorf("CleanPath() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-}
diff --git a/exporters/otlp/internal/envconfig/envconfig.go b/exporters/otlp/internal/envconfig/envconfig.go
deleted file mode 100644
index 444eefbb388..00000000000
--- a/exporters/otlp/internal/envconfig/envconfig.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"errors"
-	"fmt"
-	"net/url"
-	"strconv"
-	"strings"
-	"time"
-
-	"go.opentelemetry.io/otel/internal/global"
-)
-
-// ConfigFn is the generic function used to set a config.
-type ConfigFn func(*EnvOptionsReader)
-
-// EnvOptionsReader reads the required environment variables.
-type EnvOptionsReader struct {
-	GetEnv    func(string) string
-	ReadFile  func(string) ([]byte, error)
-	Namespace string
-}
-
-// Apply runs every ConfigFn.
-func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
-	for _, o := range opts {
-		o(e)
-	}
-}
-
-// GetEnvValue gets an OTLP environment variable value of the specified key
-// using the GetEnv function.
-// This function prepends the OTLP specified namespace to all key lookups.
-func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
-	v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
-	return v, v != ""
-}
-
-// WithString retrieves the specified config and passes it to ConfigFn as a string.
-func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			fn(v)
-		}
-	}
-}
-
-// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
-func WithBool(n string, fn func(bool)) ConfigFn {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			b := strings.ToLower(v) == "true"
-			fn(b)
-		}
-	}
-}
-
-// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
-func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			d, err := strconv.Atoi(v)
-			if err != nil {
-				global.Error(err, "parse duration", "input", v)
-				return
-			}
-			fn(time.Duration(d) * time.Millisecond)
-		}
-	}
-}
-
-// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
-func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			fn(stringToHeader(v))
-		}
-	}
-}
-
-// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
-func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			u, err := url.Parse(v)
-			if err != nil {
-				global.Error(err, "parse url", "input", v)
-				return
-			}
-			fn(u)
-		}
-	}
-}
-
-// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
-func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			b, err := e.ReadFile(v)
-			if err != nil {
-				global.Error(err, "read tls ca cert file", "file", v)
-				return
-			}
-			c, err := createCertPool(b)
-			if err != nil {
-				global.Error(err, "create tls cert pool")
-				return
-			}
-			fn(c)
-		}
-	}
-}
-
-// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
-func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
-	return func(e *EnvOptionsReader) {
-		vc, okc := e.GetEnvValue(nc)
-		vk, okk := e.GetEnvValue(nk)
-		if !okc || !okk {
-			return
-		}
-		cert, err := e.ReadFile(vc)
-		if err != nil {
-			global.Error(err, "read tls client cert", "file", vc)
-			return
-		}
-		key, err := e.ReadFile(vk)
-		if err != nil {
-			global.Error(err, "read tls client key", "file", vk)
-			return
-		}
-		crt, err := tls.X509KeyPair(cert, key)
-		if err != nil {
-			global.Error(err, "create tls client key pair")
-			return
-		}
-		fn(crt)
-	}
-}
-
-func keyWithNamespace(ns, key string) string {
-	if ns == "" {
-		return key
-	}
-	return fmt.Sprintf("%s_%s", ns, key)
-}
-
-func stringToHeader(value string) map[string]string {
-	headersPairs := strings.Split(value, ",")
-	headers := make(map[string]string)
-
-	for _, header := range headersPairs {
-		n, v, found := strings.Cut(header, "=")
-		if !found {
-			global.Error(errors.New("missing '="), "parse headers", "input", header)
-			continue
-		}
-		name, err := url.QueryUnescape(n)
-		if err != nil {
-			global.Error(err, "escape header key", "key", n)
-			continue
-		}
-		trimmedName := strings.TrimSpace(name)
-		value, err := url.QueryUnescape(v)
-		if err != nil {
-			global.Error(err, "escape header value", "value", v)
-			continue
-		}
-		trimmedValue := strings.TrimSpace(value)
-
-		headers[trimmedName] = trimmedValue
-	}
-
-	return headers
-}
-
-func createCertPool(certBytes []byte) (*x509.CertPool, error) {
-	cp := x509.NewCertPool()
-	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
-		return nil, errors.New("failed to append certificate to the cert pool")
-	}
-	return cp, nil
-}
diff --git a/exporters/otlp/internal/envconfig/envconfig_test.go b/exporters/otlp/internal/envconfig/envconfig_test.go
deleted file mode 100644
index eb2ab8a6806..00000000000
--- a/exporters/otlp/internal/envconfig/envconfig_test.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"errors"
-	"net/url"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-)
-
-const WeakKey = `
------BEGIN EC PRIVATE KEY-----
-MHcCAQEEIEbrSPmnlSOXvVzxCyv+VR3a0HDeUTvOcqrdssZ2k4gFoAoGCCqGSM49
-AwEHoUQDQgAEDMTfv75J315C3K9faptS9iythKOMEeV/Eep73nWX531YAkmmwBSB
-2dXRD/brsgLnfG57WEpxZuY7dPRbxu33BA==
------END EC PRIVATE KEY-----
-`
-
-const WeakCertificate = `
------BEGIN CERTIFICATE-----
-MIIBjjCCATWgAwIBAgIUKQSMC66MUw+kPp954ZYOcyKAQDswCgYIKoZIzj0EAwIw
-EjEQMA4GA1UECgwHb3RlbC1nbzAeFw0yMjEwMTkwMDA5MTlaFw0yMzEwMTkwMDA5
-MTlaMBIxEDAOBgNVBAoMB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
-AAQMxN+/vknfXkLcr19qm1L2LK2Eo4wR5X8R6nvedZfnfVgCSabAFIHZ1dEP9uuy
-Aud8bntYSnFm5jt09FvG7fcEo2kwZzAdBgNVHQ4EFgQUicGuhnTTkYLZwofXMNLK
-SHFeCWgwHwYDVR0jBBgwFoAUicGuhnTTkYLZwofXMNLKSHFeCWgwDwYDVR0TAQH/
-BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDRwAwRAIg
-Lfma8FnnxeSOi6223AsFfYwsNZ2RderNsQrS0PjEHb0CIBkrWacqARUAu7uT4cGu
-jVcIxYQqhId5L8p/mAv2PWZS
------END CERTIFICATE-----
-`
-
-type testOption struct {
-	TestString   string
-	TestBool     bool
-	TestDuration time.Duration
-	TestHeaders  map[string]string
-	TestURL      *url.URL
-	TestTLS      *tls.Config
-}
-
-func TestEnvConfig(t *testing.T) {
-	parsedURL, err := url.Parse("https://example.com")
-	assert.NoError(t, err)
-
-	options := []testOption{}
-	for _, testcase := range []struct {
-		name            string
-		reader          EnvOptionsReader
-		configs         []ConfigFn
-		expectedOptions []testOption
-	}{
-		{
-			name: "with no namespace and a matching key",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithString("HELLO", func(v string) {
-					options = append(options, testOption{TestString: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestString: "world",
-				},
-			},
-		},
-		{
-			name: "with no namespace and a non-matching key",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithString("HOLA", func(v string) {
-					options = append(options, testOption{TestString: v})
-				}),
-			},
-			expectedOptions: []testOption{},
-		},
-		{
-			name: "with a namespace and a matching key",
-			reader: EnvOptionsReader{
-				Namespace: "MY_NAMESPACE",
-				GetEnv: func(n string) string {
-					if n == "MY_NAMESPACE_HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithString("HELLO", func(v string) {
-					options = append(options, testOption{TestString: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestString: "world",
-				},
-			},
-		},
-		{
-			name: "with no namespace and a non-matching key",
-			reader: EnvOptionsReader{
-				Namespace: "MY_NAMESPACE",
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithString("HELLO", func(v string) {
-					options = append(options, testOption{TestString: v})
-				}),
-			},
-			expectedOptions: []testOption{},
-		},
-		{
-			name: "with a bool config",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "true"
-					} else if n == "WORLD" {
-						return "false"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithBool("HELLO", func(b bool) {
-					options = append(options, testOption{TestBool: b})
-				}),
-				WithBool("WORLD", func(b bool) {
-					options = append(options, testOption{TestBool: b})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestBool: true,
-				},
-				{
-					TestBool: false,
-				},
-			},
-		},
-		{
-			name: "with an invalid bool config",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithBool("HELLO", func(b bool) {
-					options = append(options, testOption{TestBool: b})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestBool: false,
-				},
-			},
-		},
-		{
-			name: "with a duration config",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "60"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithDuration("HELLO", func(v time.Duration) {
-					options = append(options, testOption{TestDuration: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestDuration: 60_000_000, // 60 milliseconds
-				},
-			},
-		},
-		{
-			name: "with an invalid duration config",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithDuration("HELLO", func(v time.Duration) {
-					options = append(options, testOption{TestDuration: v})
-				}),
-			},
-			expectedOptions: []testOption{},
-		},
-		{
-			name: "with headers",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "userId=42,userName=alice"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithHeaders("HELLO", func(v map[string]string) {
-					options = append(options, testOption{TestHeaders: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestHeaders: map[string]string{
-						"userId":   "42",
-						"userName": "alice",
-					},
-				},
-			},
-		},
-		{
-			name: "with invalid headers",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithHeaders("HELLO", func(v map[string]string) {
-					options = append(options, testOption{TestHeaders: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestHeaders: map[string]string{},
-				},
-			},
-		},
-		{
-			name: "with URL",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "https://example.com"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithURL("HELLO", func(v *url.URL) {
-					options = append(options, testOption{TestURL: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestURL: parsedURL,
-				},
-			},
-		},
-		{
-			name: "with invalid URL",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "i nvalid://url"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithURL("HELLO", func(v *url.URL) {
-					options = append(options, testOption{TestURL: v})
-				}),
-			},
-			expectedOptions: []testOption{},
-		},
-	} {
-		t.Run(testcase.name, func(t *testing.T) {
-			testcase.reader.Apply(testcase.configs...)
-			assert.Equal(t, testcase.expectedOptions, options)
-			options = []testOption{}
-		})
-	}
-}
-
-func TestWithTLSConfig(t *testing.T) {
-	pool, err := createCertPool([]byte(WeakCertificate))
-	assert.NoError(t, err)
-
-	reader := EnvOptionsReader{
-		GetEnv: func(n string) string {
-			if n == "CERTIFICATE" {
-				return "/path/cert.pem"
-			}
-			return ""
-		},
-		ReadFile: func(p string) ([]byte, error) {
-			if p == "/path/cert.pem" {
-				return []byte(WeakCertificate), nil
-			}
-			return []byte{}, nil
-		},
-	}
-
-	var option testOption
-	reader.Apply(
-		WithCertPool("CERTIFICATE", func(cp *x509.CertPool) {
-			option = testOption{TestTLS: &tls.Config{RootCAs: cp}}
-		}),
-	)
-
-	// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-	assert.Equal(t, pool.Subjects(), option.TestTLS.RootCAs.Subjects())
-}
-
-func TestWithClientCert(t *testing.T) {
-	cert, err := tls.X509KeyPair([]byte(WeakCertificate), []byte(WeakKey))
-	assert.NoError(t, err)
-
-	reader := EnvOptionsReader{
-		GetEnv: func(n string) string {
-			switch n {
-			case "CLIENT_CERTIFICATE":
-				return "/path/tls.crt"
-			case "CLIENT_KEY":
-				return "/path/tls.key"
-			}
-			return ""
-		},
-		ReadFile: func(n string) ([]byte, error) {
-			switch n {
-			case "/path/tls.crt":
-				return []byte(WeakCertificate), nil
-			case "/path/tls.key":
-				return []byte(WeakKey), nil
-			}
-			return []byte{}, nil
-		},
-	}
-
-	var option testOption
-	reader.Apply(
-		WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) {
-			option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}}
-		}),
-	)
-	assert.Equal(t, cert, option.TestTLS.Certificates[0])
-
-	reader.ReadFile = func(s string) ([]byte, error) { return nil, errors.New("oops") }
-	option.TestTLS = nil
-	reader.Apply(
-		WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) {
-			option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}}
-		}),
-	)
-	assert.Nil(t, option.TestTLS)
-
-	reader.GetEnv = func(s string) string { return "" }
-	option.TestTLS = nil
-	reader.Apply(
-		WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) {
-			option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}}
-		}),
-	)
-	assert.Nil(t, option.TestTLS)
-}
-
-func TestStringToHeader(t *testing.T) {
-	tests := []struct {
-		name  string
-		value string
-		want  map[string]string
-	}{
-		{
-			name:  "simple test",
-			value: "userId=alice",
-			want:  map[string]string{"userId": "alice"},
-		},
-		{
-			name:  "simple test with spaces",
-			value: " userId = alice  ",
-			want:  map[string]string{"userId": "alice"},
-		},
-		{
-			name:  "multiples headers encoded",
-			value: "userId=alice,serverNode=DF%3A28,isProduction=false",
-			want: map[string]string{
-				"userId":       "alice",
-				"serverNode":   "DF:28",
-				"isProduction": "false",
-			},
-		},
-		{
-			name:  "invalid headers format",
-			value: "userId:alice",
-			want:  map[string]string{},
-		},
-		{
-			name:  "invalid key",
-			value: "%XX=missing,userId=alice",
-			want: map[string]string{
-				"userId": "alice",
-			},
-		},
-		{
-			name:  "invalid value",
-			value: "missing=%XX,userId=alice",
-			want: map[string]string{
-				"userId": "alice",
-			},
-		},
-	}
-
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			assert.Equal(t, tt.want, stringToHeader(tt.value))
-		})
-	}
-}
diff --git a/exporters/otlp/internal/partialsuccess.go b/exporters/otlp/internal/partialsuccess.go
deleted file mode 100644
index 9ab89b37574..00000000000
--- a/exporters/otlp/internal/partialsuccess.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
-
-import "fmt"
-
-// PartialSuccess represents the underlying error for all handling
-// OTLP partial success messages.  Use `errors.Is(err,
-// PartialSuccess{})` to test whether an error passed to the OTel
-// error handler belongs to this category.
-type PartialSuccess struct {
-	ErrorMessage  string
-	RejectedItems int64
-	RejectedKind  string
-}
-
-var _ error = PartialSuccess{}
-
-// Error implements the error interface.
-func (ps PartialSuccess) Error() string {
-	msg := ps.ErrorMessage
-	if msg == "" {
-		msg = "empty message"
-	}
-	return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
-}
-
-// Is supports the errors.Is() interface.
-func (ps PartialSuccess) Is(err error) bool {
-	_, ok := err.(PartialSuccess)
-	return ok
-}
-
-// TracePartialSuccessError returns an error describing a partial success
-// response for the trace signal.
-func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
-	return PartialSuccess{
-		ErrorMessage:  errorMessage,
-		RejectedItems: itemsRejected,
-		RejectedKind:  "spans",
-	}
-}
-
-// MetricPartialSuccessError returns an error describing a partial success
-// response for the metric signal.
-func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
-	return PartialSuccess{
-		ErrorMessage:  errorMessage,
-		RejectedItems: itemsRejected,
-		RejectedKind:  "metric data points",
-	}
-}
diff --git a/exporters/otlp/internal/partialsuccess_test.go b/exporters/otlp/internal/partialsuccess_test.go
deleted file mode 100644
index 9032f244cc4..00000000000
--- a/exporters/otlp/internal/partialsuccess_test.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
-
-import (
-	"errors"
-	"strings"
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func requireErrorString(t *testing.T, expect string, err error) {
-	t.Helper()
-	require.NotNil(t, err)
-	require.Error(t, err)
-	require.True(t, errors.Is(err, PartialSuccess{}))
-
-	const pfx = "OTLP partial success: "
-
-	msg := err.Error()
-	require.True(t, strings.HasPrefix(msg, pfx))
-	require.Equal(t, expect, msg[len(pfx):])
-}
-
-func TestPartialSuccessFormat(t *testing.T) {
-	requireErrorString(t, "empty message (0 metric data points rejected)", MetricPartialSuccessError(0, ""))
-	requireErrorString(t, "help help (0 metric data points rejected)", MetricPartialSuccessError(0, "help help"))
-	requireErrorString(t, "what happened (10 metric data points rejected)", MetricPartialSuccessError(10, "what happened"))
-	requireErrorString(t, "what happened (15 spans rejected)", TracePartialSuccessError(15, "what happened"))
-}
diff --git a/exporters/otlp/internal/retry/go.mod b/exporters/otlp/internal/retry/go.mod
deleted file mode 100644
index 1f9fec184df..00000000000
--- a/exporters/otlp/internal/retry/go.mod
+++ /dev/null
@@ -1,16 +0,0 @@
-// Deprecated: package retry exists for historical compatibility, it should not
-// be used.
-module go.opentelemetry.io/otel/exporters/otlp/internal/retry
-
-go 1.19
-
-require (
-	github.com/cenkalti/backoff/v4 v4.2.1
-	github.com/stretchr/testify v1.8.4
-)
-
-require (
-	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/pmezard/go-difflib v1.0.0 // indirect
-	gopkg.in/yaml.v3 v3.0.1 // indirect
-)
diff --git a/exporters/otlp/internal/retry/go.sum b/exporters/otlp/internal/retry/go.sum
deleted file mode 100644
index 987eed906b6..00000000000
--- a/exporters/otlp/internal/retry/go.sum
+++ /dev/null
@@ -1,12 +0,0 @@
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/exporters/otlp/internal/retry/retry.go b/exporters/otlp/internal/retry/retry.go
deleted file mode 100644
index 94c4af0e562..00000000000
--- a/exporters/otlp/internal/retry/retry.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package retry provides request retry functionality that can perform
-// configurable exponential backoff for transient errors and honor any
-// explicit throttle responses received.
-//
-// Deprecated: package retry exists for historical compatibility, it should not
-// be used.
-package retry // import "go.opentelemetry.io/otel/exporters/otlp/internal/retry"
-
-import (
-	"context"
-	"fmt"
-	"time"
-
-	"github.com/cenkalti/backoff/v4"
-)
-
-// DefaultConfig are the recommended defaults to use.
-var DefaultConfig = Config{
-	Enabled:         true,
-	InitialInterval: 5 * time.Second,
-	MaxInterval:     30 * time.Second,
-	MaxElapsedTime:  time.Minute,
-}
-
-// Config defines configuration for retrying batches in case of export failure
-// using an exponential backoff.
-type Config struct {
-	// Enabled indicates whether to not retry sending batches in case of
-	// export failure.
-	Enabled bool
-	// InitialInterval the time to wait after the first failure before
-	// retrying.
-	InitialInterval time.Duration
-	// MaxInterval is the upper bound on backoff interval. Once this value is
-	// reached the delay between consecutive retries will always be
-	// `MaxInterval`.
-	MaxInterval time.Duration
-	// MaxElapsedTime is the maximum amount of time (including retries) spent
-	// trying to send a request/batch.  Once this value is reached, the data
-	// is discarded.
-	MaxElapsedTime time.Duration
-}
-
-// RequestFunc wraps a request with retry logic.
-type RequestFunc func(context.Context, func(context.Context) error) error
-
-// EvaluateFunc returns if an error is retry-able and if an explicit throttle
-// duration should be honored that was included in the error.
-//
-// The function must return true if the error argument is retry-able,
-// otherwise it must return false for the first return parameter.
-//
-// The function must return a non-zero time.Duration if the error contains
-// explicit throttle duration that should be honored, otherwise it must return
-// a zero valued time.Duration.
-type EvaluateFunc func(error) (bool, time.Duration)
-
-// RequestFunc returns a RequestFunc using the evaluate function to determine
-// if requests can be retried and based on the exponential backoff
-// configuration of c.
-func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
-	if !c.Enabled {
-		return func(ctx context.Context, fn func(context.Context) error) error {
-			return fn(ctx)
-		}
-	}
-
-	return func(ctx context.Context, fn func(context.Context) error) error {
-		// Do not use NewExponentialBackOff since it calls Reset and the code here
-		// must call Reset after changing the InitialInterval (this saves an
-		// unnecessary call to Now).
-		b := &backoff.ExponentialBackOff{
-			InitialInterval:     c.InitialInterval,
-			RandomizationFactor: backoff.DefaultRandomizationFactor,
-			Multiplier:          backoff.DefaultMultiplier,
-			MaxInterval:         c.MaxInterval,
-			MaxElapsedTime:      c.MaxElapsedTime,
-			Stop:                backoff.Stop,
-			Clock:               backoff.SystemClock,
-		}
-		b.Reset()
-
-		for {
-			err := fn(ctx)
-			if err == nil {
-				return nil
-			}
-
-			retryable, throttle := evaluate(err)
-			if !retryable {
-				return err
-			}
-
-			bOff := b.NextBackOff()
-			if bOff == backoff.Stop {
-				return fmt.Errorf("max retry time elapsed: %w", err)
-			}
-
-			// Wait for the greater of the backoff or throttle delay.
-			var delay time.Duration
-			if bOff > throttle {
-				delay = bOff
-			} else {
-				elapsed := b.GetElapsedTime()
-				if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
-					return fmt.Errorf("max retry time would elapse: %w", err)
-				}
-				delay = throttle
-			}
-
-			if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
-				return fmt.Errorf("%w: %s", ctxErr, err)
-			}
-		}
-	}
-}
-
-// Allow override for testing.
-var waitFunc = wait
-
-// wait takes the caller's context, and the amount of time to wait.  It will
-// return nil if the timer fires before or at the same time as the context's
-// deadline.  This indicates that the call can be retried.
-func wait(ctx context.Context, delay time.Duration) error {
-	timer := time.NewTimer(delay)
-	defer timer.Stop()
-
-	select {
-	case <-ctx.Done():
-		// Handle the case where the timer and context deadline end
-		// simultaneously by prioritizing the timer expiration nil value
-		// response.
-		select {
-		case <-timer.C:
-		default:
-			return ctx.Err()
-		}
-	case <-timer.C:
-	}
-
-	return nil
-}
diff --git a/exporters/otlp/internal/retry/retry_test.go b/exporters/otlp/internal/retry/retry_test.go
deleted file mode 100644
index de574a73579..00000000000
--- a/exporters/otlp/internal/retry/retry_test.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package retry
-
-import (
-	"context"
-	"errors"
-	"math"
-	"sync"
-	"testing"
-	"time"
-
-	"github.com/cenkalti/backoff/v4"
-	"github.com/stretchr/testify/assert"
-)
-
-func TestWait(t *testing.T) {
-	tests := []struct {
-		ctx      context.Context
-		delay    time.Duration
-		expected error
-	}{
-		{
-			ctx:   context.Background(),
-			delay: time.Duration(0),
-		},
-		{
-			ctx:   context.Background(),
-			delay: time.Duration(1),
-		},
-		{
-			ctx:   context.Background(),
-			delay: time.Duration(-1),
-		},
-		{
-			ctx: func() context.Context {
-				ctx, cancel := context.WithCancel(context.Background())
-				cancel()
-				return ctx
-			}(),
-			// Ensure the timer and context do not end simultaneously.
-			delay:    1 * time.Hour,
-			expected: context.Canceled,
-		},
-	}
-
-	for _, test := range tests {
-		err := wait(test.ctx, test.delay)
-		if test.expected == nil {
-			assert.NoError(t, err)
-		} else {
-			assert.ErrorIs(t, err, test.expected)
-		}
-	}
-}
-
-func TestNonRetryableError(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return false, 0 }
-
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: 1 * time.Nanosecond,
-		MaxInterval:     1 * time.Nanosecond,
-		// Never stop retrying.
-		MaxElapsedTime: 0,
-	}.RequestFunc(ev)
-	ctx := context.Background()
-	assert.NoError(t, reqFunc(ctx, func(context.Context) error {
-		return nil
-	}))
-	assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
-		return assert.AnError
-	}), assert.AnError)
-}
-
-func TestThrottledRetry(t *testing.T) {
-	// Ensure the throttle delay is used by making longer than backoff delay.
-	throttleDelay, backoffDelay := time.Second, time.Nanosecond
-
-	ev := func(error) (bool, time.Duration) {
-		// Retry everything with a throttle delay.
-		return true, throttleDelay
-	}
-
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: backoffDelay,
-		MaxInterval:     backoffDelay,
-		// Never stop retrying.
-		MaxElapsedTime: 0,
-	}.RequestFunc(ev)
-
-	origWait := waitFunc
-	var done bool
-	waitFunc = func(_ context.Context, delay time.Duration) error {
-		assert.Equal(t, throttleDelay, delay, "retry not throttled")
-		// Try twice to ensure call is attempted again after delay.
-		if done {
-			return assert.AnError
-		}
-		done = true
-		return nil
-	}
-	defer func() { waitFunc = origWait }()
-
-	ctx := context.Background()
-	assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
-		return errors.New("not this error")
-	}), assert.AnError)
-}
-
-func TestBackoffRetry(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return true, 0 }
-
-	delay := time.Nanosecond
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: delay,
-		MaxInterval:     delay,
-		// Never stop retrying.
-		MaxElapsedTime: 0,
-	}.RequestFunc(ev)
-
-	origWait := waitFunc
-	var done bool
-	waitFunc = func(_ context.Context, d time.Duration) error {
-		delta := math.Ceil(float64(delay) * backoff.DefaultRandomizationFactor)
-		assert.InDelta(t, delay, d, delta, "retry not backoffed")
-		// Try twice to ensure call is attempted again after delay.
-		if done {
-			return assert.AnError
-		}
-		done = true
-		return nil
-	}
-	t.Cleanup(func() { waitFunc = origWait })
-
-	ctx := context.Background()
-	assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
-		return errors.New("not this error")
-	}), assert.AnError)
-}
-
-func TestBackoffRetryCanceledContext(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return true, 0 }
-
-	delay := time.Millisecond
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: delay,
-		MaxInterval:     delay,
-		// Never stop retrying.
-		MaxElapsedTime: 10 * time.Millisecond,
-	}.RequestFunc(ev)
-
-	ctx, cancel := context.WithCancel(context.Background())
-	count := 0
-	cancel()
-	err := reqFunc(ctx, func(context.Context) error {
-		count++
-		return assert.AnError
-	})
-
-	assert.ErrorIs(t, err, context.Canceled)
-	assert.Contains(t, err.Error(), assert.AnError.Error())
-	assert.Equal(t, 1, count)
-}
-
-func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) {
-	// Ensure the throttle delay is used by making longer than backoff delay.
-	tDelay, bDelay := time.Hour, time.Nanosecond
-	ev := func(error) (bool, time.Duration) { return true, tDelay }
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: bDelay,
-		MaxInterval:     bDelay,
-		MaxElapsedTime:  tDelay - (time.Nanosecond),
-	}.RequestFunc(ev)
-
-	ctx := context.Background()
-	assert.Contains(t, reqFunc(ctx, func(context.Context) error {
-		return assert.AnError
-	}).Error(), "max retry time would elapse: ")
-}
-
-func TestMaxElapsedTime(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return true, 0 }
-	delay := time.Nanosecond
-	reqFunc := Config{
-		Enabled: true,
-		// InitialInterval > MaxElapsedTime means immediate return.
-		InitialInterval: 2 * delay,
-		MaxElapsedTime:  delay,
-	}.RequestFunc(ev)
-
-	ctx := context.Background()
-	assert.Contains(t, reqFunc(ctx, func(context.Context) error {
-		return assert.AnError
-	}).Error(), "max retry time elapsed: ")
-}
-
-func TestRetryNotEnabled(t *testing.T) {
-	ev := func(error) (bool, time.Duration) {
-		t.Error("evaluated retry when not enabled")
-		return false, 0
-	}
-
-	reqFunc := Config{}.RequestFunc(ev)
-	ctx := context.Background()
-	assert.NoError(t, reqFunc(ctx, func(context.Context) error {
-		return nil
-	}))
-	assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
-		return assert.AnError
-	}), assert.AnError)
-}
-
-func TestRetryConcurrentSafe(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return true, 0 }
-	reqFunc := Config{
-		Enabled: true,
-	}.RequestFunc(ev)
-
-	var wg sync.WaitGroup
-	ctx := context.Background()
-
-	for i := 1; i < 5; i++ {
-		wg.Add(1)
-
-		go func() {
-			defer wg.Done()
-
-			var done bool
-			assert.NoError(t, reqFunc(ctx, func(context.Context) error {
-				if !done {
-					done = true
-					return assert.AnError
-				}
-
-				return nil
-			}))
-		}()
-	}
-
-	wg.Wait()
-}
diff --git a/exporters/otlp/internal/wrappederror.go b/exporters/otlp/internal/wrappederror.go
deleted file mode 100644
index 217751da552..00000000000
--- a/exporters/otlp/internal/wrappederror.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
-
-// ErrorKind is used to identify the kind of export error
-// being wrapped.
-type ErrorKind int
-
-const (
-	// TracesExport indicates the error comes from the OTLP trace exporter.
-	TracesExport ErrorKind = iota
-)
-
-// prefix returns a prefix for the Error() string.
-func (k ErrorKind) prefix() string {
-	switch k {
-	case TracesExport:
-		return "traces export: "
-	default:
-		return "unknown: "
-	}
-}
-
-// wrappedExportError wraps an OTLP exporter error with the kind of
-// signal that produced it.
-type wrappedExportError struct {
-	wrap error
-	kind ErrorKind
-}
-
-// WrapTracesError wraps an error from the OTLP exporter for traces.
-func WrapTracesError(err error) error {
-	return wrappedExportError{
-		wrap: err,
-		kind: TracesExport,
-	}
-}
-
-var _ error = wrappedExportError{}
-
-// Error attaches a prefix corresponding to the kind of exporter.
-func (t wrappedExportError) Error() string {
-	return t.kind.prefix() + t.wrap.Error()
-}
-
-// Unwrap returns the wrapped error.
-func (t wrappedExportError) Unwrap() error {
-	return t.wrap
-}
diff --git a/exporters/otlp/otlpmetric/doc.go b/exporters/otlp/otlpmetric/doc.go
deleted file mode 100644
index 31831c415fe..00000000000
--- a/exporters/otlp/otlpmetric/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package otlpmetric provides an OpenTelemetry metric Exporter that can be
-// used with PeriodicReader. It transforms metricdata into OTLP and transmits
-// the transformed data to OTLP receivers. The Exporter is configurable to use
-// different Clients, each using a distinct transport protocol to communicate
-// to an OTLP receiving endpoint.
-package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
diff --git a/exporters/otlp/otlpmetric/go.mod b/exporters/otlp/otlpmetric/go.mod
deleted file mode 100644
index 0f36b495e9e..00000000000
--- a/exporters/otlp/otlpmetric/go.mod
+++ /dev/null
@@ -1,47 +0,0 @@
-module go.opentelemetry.io/otel/exporters/otlp/otlpmetric
-
-go 1.19
-
-require (
-	github.com/google/go-cmp v0.5.9
-	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/sdk/metric v0.39.0
-	go.opentelemetry.io/proto/otlp v1.0.0
-	google.golang.org/grpc v1.57.0
-	google.golang.org/protobuf v1.31.0
-)
-
-require (
-	github.com/cenkalti/backoff/v4 v4.2.1 // indirect
-	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
-	github.com/go-logr/stdr v1.2.2 // indirect
-	github.com/golang/protobuf v1.5.3 // indirect
-	github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
-	github.com/kr/text v0.2.0 // indirect
-	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/rogpeppe/go-internal v1.10.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/net v0.10.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	golang.org/x/text v0.9.0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
-	gopkg.in/yaml.v3 v3.0.1 // indirect
-)
-
-replace go.opentelemetry.io/otel/metric => ../../../metric
-
-replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric
-
-replace go.opentelemetry.io/otel => ../../..
-
-replace go.opentelemetry.io/otel/sdk => ../../../sdk
-
-replace go.opentelemetry.io/otel/exporters/otlp/internal/retry => ../internal/retry
-
-replace go.opentelemetry.io/otel/trace => ../../../trace
diff --git a/exporters/otlp/otlpmetric/go.sum b/exporters/otlp/otlpmetric/go.sum
deleted file mode 100644
index f6d29fdda91..00000000000
--- a/exporters/otlp/otlpmetric/go.sum
+++ /dev/null
@@ -1,52 +0,0 @@
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
-github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
-github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
-github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
-go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
-google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
-google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
-gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/exporters/otlp/otlpmetric/internal/client.go b/exporters/otlp/otlpmetric/internal/client.go
deleted file mode 100644
index 6c6bf67c1c7..00000000000
--- a/exporters/otlp/otlpmetric/internal/client.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/sdk/metric"
-	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
-)
-
-// Client handles the transmission of OTLP data to an OTLP receiving endpoint.
-type Client interface {
-	// Temporality returns the Temporality to use for an instrument kind.
-	Temporality(metric.InstrumentKind) metricdata.Temporality
-
-	// Aggregation returns the Aggregation to use for an instrument kind.
-	Aggregation(metric.InstrumentKind) metric.Aggregation
-
-	// UploadMetrics transmits metric data to an OTLP receiver.
-	//
-	// All retry logic must be handled by UploadMetrics alone, the Exporter
-	// does not implement any retry logic. All returned errors are considered
-	// unrecoverable.
-	UploadMetrics(context.Context, *mpb.ResourceMetrics) error
-
-	// ForceFlush flushes any metric data held by an Client.
-	//
-	// The deadline or cancellation of the passed context must be honored. An
-	// appropriate error should be returned in these situations.
-	ForceFlush(context.Context) error
-
-	// Shutdown flushes all metric data held by a Client and closes any
-	// connections it holds open.
-	//
-	// The deadline or cancellation of the passed context must be honored. An
-	// appropriate error should be returned in these situations.
-	//
-	// Shutdown will only be called once by the Exporter. Once a return value
-	// is received by the Exporter from Shutdown the Client will not be used
-	// anymore. Therefore all computational resources need to be released
-	// after this is called so the Client can be garbage collected.
-	Shutdown(context.Context) error
-}
diff --git a/exporters/otlp/otlpmetric/internal/exporter.go b/exporters/otlp/otlpmetric/internal/exporter.go
deleted file mode 100644
index 508fecd6bb1..00000000000
--- a/exporters/otlp/otlpmetric/internal/exporter.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal provides common utilities for all otlpmetric exporters.
-//
-// Deprecated: package internal exists for historical compatibility, it should
-// not be used.
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
-
-import (
-	"context"
-	"fmt"
-	"sync"
-
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform" // nolint: staticcheck  // Atomic deprecation.
-	"go.opentelemetry.io/otel/sdk/metric"
-	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
-)
-
-// Exporter exports metrics data as OTLP.
-type Exporter struct {
-	// Ensure synchronous access to the client across all functionality.
-	clientMu sync.Mutex
-	client   Client
-
-	shutdownOnce sync.Once
-}
-
-// Temporality returns the Temporality to use for an instrument kind.
-func (e *Exporter) Temporality(k metric.InstrumentKind) metricdata.Temporality {
-	e.clientMu.Lock()
-	defer e.clientMu.Unlock()
-	return e.client.Temporality(k)
-}
-
-// Aggregation returns the Aggregation to use for an instrument kind.
-func (e *Exporter) Aggregation(k metric.InstrumentKind) metric.Aggregation {
-	e.clientMu.Lock()
-	defer e.clientMu.Unlock()
-	return e.client.Aggregation(k)
-}
-
-// Export transforms and transmits metric data to an OTLP receiver.
-func (e *Exporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
-	otlpRm, err := transform.ResourceMetrics(rm)
-	// Best effort upload of transformable metrics.
-	e.clientMu.Lock()
-	upErr := e.client.UploadMetrics(ctx, otlpRm)
-	e.clientMu.Unlock()
-	if upErr != nil {
-		if err == nil {
-			return fmt.Errorf("failed to upload metrics: %w", upErr)
-		}
-		// Merge the two errors.
-		return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr)
-	}
-	return err
-}
-
-// ForceFlush flushes any metric data held by an exporter.
-func (e *Exporter) ForceFlush(ctx context.Context) error {
-	// The Exporter does not hold data, forward the command to the client.
-	e.clientMu.Lock()
-	defer e.clientMu.Unlock()
-	return e.client.ForceFlush(ctx)
-}
-
-var errShutdown = fmt.Errorf("exporter is shutdown")
-
-// Shutdown flushes all metric data held by an exporter and releases any held
-// computational resources.
-func (e *Exporter) Shutdown(ctx context.Context) error {
-	err := errShutdown
-	e.shutdownOnce.Do(func() {
-		e.clientMu.Lock()
-		client := e.client
-		e.client = shutdownClient{
-			temporalitySelector: client.Temporality,
-			aggregationSelector: client.Aggregation,
-		}
-		e.clientMu.Unlock()
-		err = client.Shutdown(ctx)
-	})
-	return err
-}
-
-// New return an Exporter that uses client to transmits the OTLP data it
-// produces. The client is assumed to be fully started and able to communicate
-// with its OTLP receiving endpoint.
-func New(client Client) *Exporter {
-	return &Exporter{client: client}
-}
-
-type shutdownClient struct {
-	temporalitySelector metric.TemporalitySelector
-	aggregationSelector metric.AggregationSelector
-}
-
-func (c shutdownClient) err(ctx context.Context) error {
-	if err := ctx.Err(); err != nil {
-		return err
-	}
-	return errShutdown
-}
-
-func (c shutdownClient) Temporality(k metric.InstrumentKind) metricdata.Temporality {
-	return c.temporalitySelector(k)
-}
-
-func (c shutdownClient) Aggregation(k metric.InstrumentKind) metric.Aggregation {
-	return c.aggregationSelector(k)
-}
-
-func (c shutdownClient) UploadMetrics(ctx context.Context, _ *mpb.ResourceMetrics) error {
-	return c.err(ctx)
-}
-
-func (c shutdownClient) ForceFlush(ctx context.Context) error {
-	return c.err(ctx)
-}
-
-func (c shutdownClient) Shutdown(ctx context.Context) error {
-	return c.err(ctx)
-}
diff --git a/exporters/otlp/otlpmetric/internal/exporter_test.go b/exporters/otlp/otlpmetric/internal/exporter_test.go
deleted file mode 100644
index 6814c9dce3d..00000000000
--- a/exporters/otlp/otlpmetric/internal/exporter_test.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
-
-import (
-	"context"
-	"sync"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-
-	"go.opentelemetry.io/otel/sdk/metric"
-	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
-)
-
-type client struct {
-	// n is incremented by all Client methods. If these methods are called
-	// concurrently this should fail tests run with the race detector.
-	n int
-}
-
-func (c *client) Temporality(k metric.InstrumentKind) metricdata.Temporality {
-	return metric.DefaultTemporalitySelector(k)
-}
-
-func (c *client) Aggregation(k metric.InstrumentKind) metric.Aggregation {
-	return metric.DefaultAggregationSelector(k)
-}
-
-func (c *client) UploadMetrics(context.Context, *mpb.ResourceMetrics) error {
-	c.n++
-	return nil
-}
-
-func (c *client) ForceFlush(context.Context) error {
-	c.n++
-	return nil
-}
-
-func (c *client) Shutdown(context.Context) error {
-	c.n++
-	return nil
-}
-
-func TestExporterClientConcurrentSafe(t *testing.T) {
-	const goroutines = 5
-
-	exp := New(&client{})
-	rm := new(metricdata.ResourceMetrics)
-	ctx := context.Background()
-
-	done := make(chan struct{})
-	var wg, someWork sync.WaitGroup
-	for i := 0; i < goroutines; i++ {
-		wg.Add(1)
-		someWork.Add(1)
-		go func() {
-			defer wg.Done()
-			assert.NoError(t, exp.Export(ctx, rm))
-			assert.NoError(t, exp.ForceFlush(ctx))
-
-			// Ensure some work is done before shutting down.
-			someWork.Done()
-
-			for {
-				_ = exp.Export(ctx, rm)
-				_ = exp.ForceFlush(ctx)
-
-				select {
-				case <-done:
-					return
-				default:
-				}
-			}
-		}()
-	}
-
-	someWork.Wait()
-	assert.NoError(t, exp.Shutdown(ctx))
-	assert.ErrorIs(t, exp.Shutdown(ctx), errShutdown)
-
-	close(done)
-	wg.Wait()
-}
diff --git a/exporters/otlp/otlpmetric/internal/header.go b/exporters/otlp/otlpmetric/internal/header.go
deleted file mode 100644
index f07837ea7b6..00000000000
--- a/exporters/otlp/otlpmetric/internal/header.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
-
-import (
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
-)
-
-// GetUserAgentHeader returns an OTLP header value form "OTel OTLP Exporter Go/{{ .Version }}"
-// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md#user-agent
-func GetUserAgentHeader() string {
-	return "OTel OTLP Exporter Go/" + otlpmetric.Version()
-}
diff --git a/exporters/otlp/otlpmetric/internal/oconf/envconfig.go b/exporters/otlp/otlpmetric/internal/oconf/envconfig.go
deleted file mode 100644
index 540165de933..00000000000
--- a/exporters/otlp/otlpmetric/internal/oconf/envconfig.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"net/url"
-	"os"
-	"path"
-	"strings"
-	"time"
-
-	"go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" // nolint: staticcheck  // Synchronous deprecation.
-	"go.opentelemetry.io/otel/internal/global"
-	"go.opentelemetry.io/otel/sdk/metric"
-	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-// DefaultEnvOptionsReader is the default environments reader.
-var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
-	GetEnv:    os.Getenv,
-	ReadFile:  os.ReadFile,
-	Namespace: "OTEL_EXPORTER_OTLP",
-}
-
-// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
-func ApplyGRPCEnvConfigs(cfg Config) Config {
-	opts := getOptionsFromEnv()
-	for _, opt := range opts {
-		cfg = opt.ApplyGRPCOption(cfg)
-	}
-	return cfg
-}
-
-// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
-func ApplyHTTPEnvConfigs(cfg Config) Config {
-	opts := getOptionsFromEnv()
-	for _, opt := range opts {
-		cfg = opt.ApplyHTTPOption(cfg)
-	}
-	return cfg
-}
-
-func getOptionsFromEnv() []GenericOption {
-	opts := []GenericOption{}
-
-	tlsConf := &tls.Config{}
-	DefaultEnvOptionsReader.Apply(
-		envconfig.WithURL("ENDPOINT", func(u *url.URL) {
-			opts = append(opts, withEndpointScheme(u))
-			opts = append(opts, newSplitOption(func(cfg Config) Config {
-				cfg.Metrics.Endpoint = u.Host
-				// For OTLP/HTTP endpoint URLs without a per-signal
-				// configuration, the passed endpoint is used as a base URL
-				// and the signals are sent to these paths relative to that.
-				cfg.Metrics.URLPath = path.Join(u.Path, DefaultMetricsPath)
-				return cfg
-			}, withEndpointForGRPC(u)))
-		}),
-		envconfig.WithURL("METRICS_ENDPOINT", func(u *url.URL) {
-			opts = append(opts, withEndpointScheme(u))
-			opts = append(opts, newSplitOption(func(cfg Config) Config {
-				cfg.Metrics.Endpoint = u.Host
-				// For endpoint URLs for OTLP/HTTP per-signal variables, the
-				// URL MUST be used as-is without any modification. The only
-				// exception is that if an URL contains no path part, the root
-				// path / MUST be used.
-				path := u.Path
-				if path == "" {
-					path = "/"
-				}
-				cfg.Metrics.URLPath = path
-				return cfg
-			}, withEndpointForGRPC(u)))
-		}),
-		envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
-		envconfig.WithCertPool("METRICS_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
-		envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
-		envconfig.WithClientCert("METRICS_CLIENT_CERTIFICATE", "METRICS_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
-		envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
-		envconfig.WithBool("METRICS_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
-		withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
-		envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
-		envconfig.WithHeaders("METRICS_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
-		WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
-		WithEnvCompression("METRICS_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
-		envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
-		envconfig.WithDuration("METRICS_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
-		withEnvTemporalityPreference("METRICS_TEMPORALITY_PREFERENCE", func(t metric.TemporalitySelector) { opts = append(opts, WithTemporalitySelector(t)) }),
-	)
-
-	return opts
-}
-
-func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
-	return func(cfg Config) Config {
-		// For OTLP/gRPC endpoints, this is the target to which the
-		// exporter is going to send telemetry.
-		cfg.Metrics.Endpoint = path.Join(u.Host, u.Path)
-		return cfg
-	}
-}
-
-// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
-func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
-	return func(e *envconfig.EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			cp := NoCompression
-			if v == "gzip" {
-				cp = GzipCompression
-			}
-
-			fn(cp)
-		}
-	}
-}
-
-func withEndpointScheme(u *url.URL) GenericOption {
-	switch strings.ToLower(u.Scheme) {
-	case "http", "unix":
-		return WithInsecure()
-	default:
-		return WithSecure()
-	}
-}
-
-// revive:disable-next-line:flag-parameter
-func withInsecure(b bool) GenericOption {
-	if b {
-		return WithInsecure()
-	}
-	return WithSecure()
-}
-
-func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
-	return func(e *envconfig.EnvOptionsReader) {
-		if c.RootCAs != nil || len(c.Certificates) > 0 {
-			fn(c)
-		}
-	}
-}
-
-func withEnvTemporalityPreference(n string, fn func(metric.TemporalitySelector)) func(e *envconfig.EnvOptionsReader) {
-	return func(e *envconfig.EnvOptionsReader) {
-		if s, ok := e.GetEnvValue(n); ok {
-			switch strings.ToLower(s) {
-			case "cumulative":
-				fn(cumulativeTemporality)
-			case "delta":
-				fn(deltaTemporality)
-			case "lowmemory":
-				fn(lowMemory)
-			default:
-				global.Warn("OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE is set to an invalid value, ignoring.", "value", s)
-			}
-		}
-	}
-}
-
-func cumulativeTemporality(metric.InstrumentKind) metricdata.Temporality {
-	return metricdata.CumulativeTemporality
-}
-
-func deltaTemporality(ik metric.InstrumentKind) metricdata.Temporality {
-	switch ik {
-	case metric.InstrumentKindCounter, metric.InstrumentKindHistogram, metric.InstrumentKindObservableCounter:
-		return metricdata.DeltaTemporality
-	default:
-		return metricdata.CumulativeTemporality
-	}
-}
-
-func lowMemory(ik metric.InstrumentKind) metricdata.Temporality {
-	switch ik {
-	case metric.InstrumentKindCounter, metric.InstrumentKindHistogram:
-		return metricdata.DeltaTemporality
-	default:
-		return metricdata.CumulativeTemporality
-	}
-}
diff --git a/exporters/otlp/otlpmetric/internal/oconf/envconfig_test.go b/exporters/otlp/otlpmetric/internal/oconf/envconfig_test.go
deleted file mode 100644
index 0c54c78e29a..00000000000
--- a/exporters/otlp/otlpmetric/internal/oconf/envconfig_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oconf
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-
-	"go.opentelemetry.io/otel/sdk/metric"
-	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-func TestWithEnvTemporalityPreference(t *testing.T) {
-	origReader := DefaultEnvOptionsReader.GetEnv
-	tests := []struct {
-		name     string
-		envValue string
-		want     map[metric.InstrumentKind]metricdata.Temporality
-	}{
-		{
-			name:     "default do not set the selector",
-			envValue: "",
-		},
-		{
-			name:     "non-normative do not set the selector",
-			envValue: "non-normative",
-		},
-		{
-			name:     "cumulative",
-			envValue: "cumulative",
-			want: map[metric.InstrumentKind]metricdata.Temporality{
-				metric.InstrumentKindCounter:                 metricdata.CumulativeTemporality,
-				metric.InstrumentKindHistogram:               metricdata.CumulativeTemporality,
-				metric.InstrumentKindUpDownCounter:           metricdata.CumulativeTemporality,
-				metric.InstrumentKindObservableCounter:       metricdata.CumulativeTemporality,
-				metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality,
-				metric.InstrumentKindObservableGauge:         metricdata.CumulativeTemporality,
-			},
-		},
-		{
-			name:     "delta",
-			envValue: "delta",
-			want: map[metric.InstrumentKind]metricdata.Temporality{
-				metric.InstrumentKindCounter:                 metricdata.DeltaTemporality,
-				metric.InstrumentKindHistogram:               metricdata.DeltaTemporality,
-				metric.InstrumentKindUpDownCounter:           metricdata.CumulativeTemporality,
-				metric.InstrumentKindObservableCounter:       metricdata.DeltaTemporality,
-				metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality,
-				metric.InstrumentKindObservableGauge:         metricdata.CumulativeTemporality,
-			},
-		},
-		{
-			name:     "lowmemory",
-			envValue: "lowmemory",
-			want: map[metric.InstrumentKind]metricdata.Temporality{
-				metric.InstrumentKindCounter:                 metricdata.DeltaTemporality,
-				metric.InstrumentKindHistogram:               metricdata.DeltaTemporality,
-				metric.InstrumentKindUpDownCounter:           metricdata.CumulativeTemporality,
-				metric.InstrumentKindObservableCounter:       metricdata.CumulativeTemporality,
-				metric.InstrumentKindObservableUpDownCounter: metricdata.CumulativeTemporality,
-				metric.InstrumentKindObservableGauge:         metricdata.CumulativeTemporality,
-			},
-		},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			DefaultEnvOptionsReader.GetEnv = func(key string) string {
-				if key == "OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE" {
-					return tt.envValue
-				}
-				return origReader(key)
-			}
-			cfg := Config{}
-			cfg = ApplyGRPCEnvConfigs(cfg)
-
-			if tt.want == nil {
-				// There is no function set, the SDK's default is used.
-				assert.Nil(t, cfg.Metrics.TemporalitySelector)
-				return
-			}
-
-			require.NotNil(t, cfg.Metrics.TemporalitySelector)
-			for ik, want := range tt.want {
-				assert.Equal(t, want, cfg.Metrics.TemporalitySelector(ik))
-			}
-		})
-	}
-	DefaultEnvOptionsReader.GetEnv = origReader
-}
diff --git a/exporters/otlp/otlpmetric/internal/oconf/options.go b/exporters/otlp/otlpmetric/internal/oconf/options.go
deleted file mode 100644
index 5d8f1d25b77..00000000000
--- a/exporters/otlp/otlpmetric/internal/oconf/options.go
+++ /dev/null
@@ -1,345 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package oconf provides common metric configuration types and functionality
-// for all otlpmetric exporters.
-//
-// Deprecated: package oconf exists for historical compatibility, it should not
-// be used.
-package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
-
-import (
-	"crypto/tls"
-	"fmt"
-	"time"
-
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/backoff"
-	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/credentials/insecure"
-	"google.golang.org/grpc/encoding/gzip"
-
-	"go.opentelemetry.io/otel/exporters/otlp/internal"                       // nolint: staticcheck  // Synchronous deprecation.
-	"go.opentelemetry.io/otel/exporters/otlp/internal/retry"                 // nolint: staticcheck  // Synchronous deprecation.
-	ominternal "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" // nolint: staticcheck  // Atomic deprecation.
-	"go.opentelemetry.io/otel/sdk/metric"
-)
-
-const (
-	// DefaultMaxAttempts describes how many times the driver
-	// should retry the sending of the payload in case of a
-	// retryable error.
-	DefaultMaxAttempts int = 5
-	// DefaultMetricsPath is a default URL path for endpoint that
-	// receives metrics.
-	DefaultMetricsPath string = "/v1/metrics"
-	// DefaultBackoff is a default base backoff time used in the
-	// exponential backoff strategy.
-	DefaultBackoff time.Duration = 300 * time.Millisecond
-	// DefaultTimeout is a default max waiting time for the backend to process
-	// each span or metrics batch.
-	DefaultTimeout time.Duration = 10 * time.Second
-)
-
-type (
-	SignalConfig struct {
-		Endpoint    string
-		Insecure    bool
-		TLSCfg      *tls.Config
-		Headers     map[string]string
-		Compression Compression
-		Timeout     time.Duration
-		URLPath     string
-
-		// gRPC configurations
-		GRPCCredentials credentials.TransportCredentials
-
-		TemporalitySelector metric.TemporalitySelector
-		AggregationSelector metric.AggregationSelector
-	}
-
-	Config struct {
-		// Signal specific configurations
-		Metrics SignalConfig
-
-		RetryConfig retry.Config
-
-		// gRPC configurations
-		ReconnectionPeriod time.Duration
-		ServiceConfig      string
-		DialOptions        []grpc.DialOption
-		GRPCConn           *grpc.ClientConn
-	}
-)
-
-// NewHTTPConfig returns a new Config with all settings applied from opts and
-// any unset setting using the default HTTP config values.
-func NewHTTPConfig(opts ...HTTPOption) Config {
-	cfg := Config{
-		Metrics: SignalConfig{
-			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
-			URLPath:     DefaultMetricsPath,
-			Compression: NoCompression,
-			Timeout:     DefaultTimeout,
-
-			TemporalitySelector: metric.DefaultTemporalitySelector,
-			AggregationSelector: metric.DefaultAggregationSelector,
-		},
-		RetryConfig: retry.DefaultConfig,
-	}
-	cfg = ApplyHTTPEnvConfigs(cfg)
-	for _, opt := range opts {
-		cfg = opt.ApplyHTTPOption(cfg)
-	}
-	cfg.Metrics.URLPath = internal.CleanPath(cfg.Metrics.URLPath, DefaultMetricsPath)
-	return cfg
-}
-
-// NewGRPCConfig returns a new Config with all settings applied from opts and
-// any unset setting using the default gRPC config values.
-func NewGRPCConfig(opts ...GRPCOption) Config {
-	cfg := Config{
-		Metrics: SignalConfig{
-			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
-			URLPath:     DefaultMetricsPath,
-			Compression: NoCompression,
-			Timeout:     DefaultTimeout,
-
-			TemporalitySelector: metric.DefaultTemporalitySelector,
-			AggregationSelector: metric.DefaultAggregationSelector,
-		},
-		RetryConfig: retry.DefaultConfig,
-		DialOptions: []grpc.DialOption{grpc.WithUserAgent(ominternal.GetUserAgentHeader())},
-	}
-	cfg = ApplyGRPCEnvConfigs(cfg)
-	for _, opt := range opts {
-		cfg = opt.ApplyGRPCOption(cfg)
-	}
-
-	if cfg.ServiceConfig != "" {
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
-	}
-	// Priroritize GRPCCredentials over Insecure (passing both is an error).
-	if cfg.Metrics.GRPCCredentials != nil {
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Metrics.GRPCCredentials))
-	} else if cfg.Metrics.Insecure {
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
-	} else {
-		// Default to using the host's root CA.
-		creds := credentials.NewTLS(nil)
-		cfg.Metrics.GRPCCredentials = creds
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
-	}
-	if cfg.Metrics.Compression == GzipCompression {
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
-	}
-	if len(cfg.DialOptions) != 0 {
-		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
-	}
-	if cfg.ReconnectionPeriod != 0 {
-		p := grpc.ConnectParams{
-			Backoff:           backoff.DefaultConfig,
-			MinConnectTimeout: cfg.ReconnectionPeriod,
-		}
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
-	}
-
-	return cfg
-}
-
-type (
-	// GenericOption applies an option to the HTTP or gRPC driver.
-	GenericOption interface {
-		ApplyHTTPOption(Config) Config
-		ApplyGRPCOption(Config) Config
-
-		// A private method to prevent users implementing the
-		// interface and so future additions to it will not
-		// violate compatibility.
-		private()
-	}
-
-	// HTTPOption applies an option to the HTTP driver.
-	HTTPOption interface {
-		ApplyHTTPOption(Config) Config
-
-		// A private method to prevent users implementing the
-		// interface and so future additions to it will not
-		// violate compatibility.
-		private()
-	}
-
-	// GRPCOption applies an option to the gRPC driver.
-	GRPCOption interface {
-		ApplyGRPCOption(Config) Config
-
-		// A private method to prevent users implementing the
-		// interface and so future additions to it will not
-		// violate compatibility.
-		private()
-	}
-)
-
-// genericOption is an option that applies the same logic
-// for both gRPC and HTTP.
-type genericOption struct {
-	fn func(Config) Config
-}
-
-func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
-	return g.fn(cfg)
-}
-
-func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
-	return g.fn(cfg)
-}
-
-func (genericOption) private() {}
-
-func newGenericOption(fn func(cfg Config) Config) GenericOption {
-	return &genericOption{fn: fn}
-}
-
-// splitOption is an option that applies different logics
-// for gRPC and HTTP.
-type splitOption struct {
-	httpFn func(Config) Config
-	grpcFn func(Config) Config
-}
-
-func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
-	return g.grpcFn(cfg)
-}
-
-func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
-	return g.httpFn(cfg)
-}
-
-func (splitOption) private() {}
-
-func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
-	return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
-}
-
-// httpOption is an option that is only applied to the HTTP driver.
-type httpOption struct {
-	fn func(Config) Config
-}
-
-func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
-	return h.fn(cfg)
-}
-
-func (httpOption) private() {}
-
-func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
-	return &httpOption{fn: fn}
-}
-
-// grpcOption is an option that is only applied to the gRPC driver.
-type grpcOption struct {
-	fn func(Config) Config
-}
-
-func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
-	return h.fn(cfg)
-}
-
-func (grpcOption) private() {}
-
-func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
-	return &grpcOption{fn: fn}
-}
-
-// Generic Options
-
-func WithEndpoint(endpoint string) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Metrics.Endpoint = endpoint
-		return cfg
-	})
-}
-
-func WithCompression(compression Compression) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Metrics.Compression = compression
-		return cfg
-	})
-}
-
-func WithURLPath(urlPath string) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Metrics.URLPath = urlPath
-		return cfg
-	})
-}
-
-func WithRetry(rc retry.Config) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.RetryConfig = rc
-		return cfg
-	})
-}
-
-func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
-	return newSplitOption(func(cfg Config) Config {
-		cfg.Metrics.TLSCfg = tlsCfg.Clone()
-		return cfg
-	}, func(cfg Config) Config {
-		cfg.Metrics.GRPCCredentials = credentials.NewTLS(tlsCfg)
-		return cfg
-	})
-}
-
-func WithInsecure() GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Metrics.Insecure = true
-		return cfg
-	})
-}
-
-func WithSecure() GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Metrics.Insecure = false
-		return cfg
-	})
-}
-
-func WithHeaders(headers map[string]string) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Metrics.Headers = headers
-		return cfg
-	})
-}
-
-func WithTimeout(duration time.Duration) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Metrics.Timeout = duration
-		return cfg
-	})
-}
-
-func WithTemporalitySelector(selector metric.TemporalitySelector) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Metrics.TemporalitySelector = selector
-		return cfg
-	})
-}
-
-func WithAggregationSelector(selector metric.AggregationSelector) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Metrics.AggregationSelector = selector
-		return cfg
-	})
-}
diff --git a/exporters/otlp/otlpmetric/internal/oconf/options_test.go b/exporters/otlp/otlpmetric/internal/oconf/options_test.go
deleted file mode 100644
index 80102da7d01..00000000000
--- a/exporters/otlp/otlpmetric/internal/oconf/options_test.go
+++ /dev/null
@@ -1,465 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oconf_test
-
-import (
-	"errors"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-
-	"go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" // nolint: staticcheck  // Synchronous deprecation.
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
-	"go.opentelemetry.io/otel/sdk/metric"
-	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-)
-
-const (
-	WeakCertificate = `
------BEGIN CERTIFICATE-----
-MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ
-MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa
-MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9
-nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z
-sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI
-KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA
-AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/
-1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH
-Lhnm4N/QDk5rek0=
------END CERTIFICATE-----
-`
-	WeakPrivateKey = `
------BEGIN PRIVATE KEY-----
-MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK
-SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf
-kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV
------END PRIVATE KEY-----
-`
-)
-
-type env map[string]string
-
-func (e *env) getEnv(env string) string {
-	return (*e)[env]
-}
-
-type fileReader map[string][]byte
-
-func (f *fileReader) readFile(filename string) ([]byte, error) {
-	if b, ok := (*f)[filename]; ok {
-		return b, nil
-	}
-	return nil, errors.New("file not found")
-}
-
-func TestConfigs(t *testing.T) {
-	tlsCert, err := oconf.CreateTLSConfig([]byte(WeakCertificate))
-	assert.NoError(t, err)
-
-	tests := []struct {
-		name       string
-		opts       []oconf.GenericOption
-		env        env
-		fileReader fileReader
-		asserts    func(t *testing.T, c *oconf.Config, grpcOption bool)
-	}{
-		{
-			name: "Test default configs",
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				if grpcOption {
-					assert.Equal(t, "localhost:4317", c.Metrics.Endpoint)
-				} else {
-					assert.Equal(t, "localhost:4318", c.Metrics.Endpoint)
-				}
-				assert.Equal(t, oconf.NoCompression, c.Metrics.Compression)
-				assert.Equal(t, map[string]string(nil), c.Metrics.Headers)
-				assert.Equal(t, 10*time.Second, c.Metrics.Timeout)
-			},
-		},
-
-		// Endpoint Tests
-		{
-			name: "Test With Endpoint",
-			opts: []oconf.GenericOption{
-				oconf.WithEndpoint("someendpoint"),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, "someendpoint", c.Metrics.Endpoint)
-			},
-		},
-		{
-			name: "Test Environment Endpoint",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.False(t, c.Metrics.Insecure)
-				if grpcOption {
-					assert.Equal(t, "env.endpoint/prefix", c.Metrics.Endpoint)
-				} else {
-					assert.Equal(t, "env.endpoint", c.Metrics.Endpoint)
-					assert.Equal(t, "/prefix/v1/metrics", c.Metrics.URLPath)
-				}
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Endpoint",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT":         "https://overrode.by.signal.specific/env/var",
-				"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "http://env.metrics.endpoint",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.True(t, c.Metrics.Insecure)
-				assert.Equal(t, "env.metrics.endpoint", c.Metrics.Endpoint)
-				if !grpcOption {
-					assert.Equal(t, "/", c.Metrics.URLPath)
-				}
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Endpoint",
-			opts: []oconf.GenericOption{
-				oconf.WithEndpoint("metrics_endpoint"),
-			},
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, "metrics_endpoint", c.Metrics.Endpoint)
-			},
-		},
-		{
-			name: "Test Environment Endpoint with HTTP scheme",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
-				assert.Equal(t, true, c.Metrics.Insecure)
-			},
-		},
-		{
-			name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "      http://env_endpoint    ",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
-				assert.Equal(t, true, c.Metrics.Insecure)
-			},
-		},
-		{
-			name: "Test Environment Endpoint with HTTPS scheme",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, "env_endpoint", c.Metrics.Endpoint)
-				assert.Equal(t, false, c.Metrics.Insecure)
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Endpoint with uppercase scheme",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT":         "HTTPS://overrode_by_signal_specific",
-				"OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "HtTp://env_metrics_endpoint",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint)
-				assert.Equal(t, true, c.Metrics.Insecure)
-			},
-		},
-
-		// Certificate tests
-		{
-			name: "Test Default Certificate",
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				if grpcOption {
-					assert.NotNil(t, c.Metrics.GRPCCredentials)
-				} else {
-					assert.Nil(t, c.Metrics.TLSCfg)
-				}
-			},
-		},
-		{
-			name: "Test With Certificate",
-			opts: []oconf.GenericOption{
-				oconf.WithTLSClientConfig(tlsCert),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				if grpcOption {
-					//TODO: make sure gRPC's credentials actually works
-					assert.NotNil(t, c.Metrics.GRPCCredentials)
-				} else {
-					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-					assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
-				}
-			},
-		},
-		{
-			name: "Test Environment Certificate",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
-			},
-			fileReader: fileReader{
-				"cert_path": []byte(WeakCertificate),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				if grpcOption {
-					assert.NotNil(t, c.Metrics.GRPCCredentials)
-				} else {
-					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-					assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
-				}
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Certificate",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_CERTIFICATE":         "overrode_by_signal_specific",
-				"OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE": "cert_path",
-			},
-			fileReader: fileReader{
-				"cert_path":    []byte(WeakCertificate),
-				"invalid_cert": []byte("invalid certificate file."),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				if grpcOption {
-					assert.NotNil(t, c.Metrics.GRPCCredentials)
-				} else {
-					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-					assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Metrics.TLSCfg.RootCAs.Subjects())
-				}
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Certificate",
-			opts: []oconf.GenericOption{},
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
-			},
-			fileReader: fileReader{
-				"cert_path": []byte(WeakCertificate),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				if grpcOption {
-					assert.NotNil(t, c.Metrics.GRPCCredentials)
-				} else {
-					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-					assert.Equal(t, 1, len(c.Metrics.TLSCfg.RootCAs.Subjects()))
-				}
-			},
-		},
-
-		// Headers tests
-		{
-			name: "Test With Headers",
-			opts: []oconf.GenericOption{
-				oconf.WithHeaders(map[string]string{"h1": "v1"}),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, map[string]string{"h1": "v1"}, c.Metrics.Headers)
-			},
-		},
-		{
-			name: "Test Environment Headers",
-			env:  map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Headers",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_HEADERS":         "overrode_by_signal_specific",
-				"OTEL_EXPORTER_OTLP_METRICS_HEADERS": "h1=v1,h2=v2",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers)
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Headers",
-			env:  map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
-			opts: []oconf.GenericOption{
-				oconf.WithHeaders(map[string]string{"m1": "mv1"}),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, map[string]string{"m1": "mv1"}, c.Metrics.Headers)
-			},
-		},
-
-		// Compression Tests
-		{
-			name: "Test With Compression",
-			opts: []oconf.GenericOption{
-				oconf.WithCompression(oconf.GzipCompression),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, oconf.GzipCompression, c.Metrics.Compression)
-			},
-		},
-		{
-			name: "Test Environment Compression",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_COMPRESSION": "gzip",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, oconf.GzipCompression, c.Metrics.Compression)
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Compression",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, oconf.GzipCompression, c.Metrics.Compression)
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Compression",
-			opts: []oconf.GenericOption{
-				oconf.WithCompression(oconf.NoCompression),
-			},
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, oconf.NoCompression, c.Metrics.Compression)
-			},
-		},
-
-		// Timeout Tests
-		{
-			name: "Test With Timeout",
-			opts: []oconf.GenericOption{
-				oconf.WithTimeout(time.Duration(5 * time.Second)),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, 5*time.Second, c.Metrics.Timeout)
-			},
-		},
-		{
-			name: "Test Environment Timeout",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, c.Metrics.Timeout, 15*time.Second)
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Timeout",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_TIMEOUT":         "15000",
-				"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, c.Metrics.Timeout, 28*time.Second)
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Timeout",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_TIMEOUT":         "15000",
-				"OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000",
-			},
-			opts: []oconf.GenericOption{
-				oconf.WithTimeout(5 * time.Second),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				assert.Equal(t, c.Metrics.Timeout, 5*time.Second)
-			},
-		},
-
-		// Temporality Selector Tests
-		{
-			name: "WithTemporalitySelector",
-			opts: []oconf.GenericOption{
-				oconf.WithTemporalitySelector(deltaSelector),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				// Function value comparisons are disallowed, test non-default
-				// behavior of a TemporalitySelector here to ensure our "catch
-				// all" was set.
-				var undefinedKind metric.InstrumentKind
-				got := c.Metrics.TemporalitySelector
-				assert.Equal(t, metricdata.DeltaTemporality, got(undefinedKind))
-			},
-		},
-
-		// Aggregation Selector Tests
-		{
-			name: "WithAggregationSelector",
-			opts: []oconf.GenericOption{
-				oconf.WithAggregationSelector(dropSelector),
-			},
-			asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) {
-				// Function value comparisons are disallowed, test non-default
-				// behavior of a AggregationSelector here to ensure our "catch
-				// all" was set.
-				var undefinedKind metric.InstrumentKind
-				got := c.Metrics.AggregationSelector
-				assert.Equal(t, metric.AggregationDrop{}, got(undefinedKind))
-			},
-		},
-	}
-
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			origEOR := oconf.DefaultEnvOptionsReader
-			oconf.DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
-				GetEnv:    tt.env.getEnv,
-				ReadFile:  tt.fileReader.readFile,
-				Namespace: "OTEL_EXPORTER_OTLP",
-			}
-			t.Cleanup(func() { oconf.DefaultEnvOptionsReader = origEOR })
-
-			// Tests Generic options as HTTP Options
-			cfg := oconf.NewHTTPConfig(asHTTPOptions(tt.opts)...)
-			tt.asserts(t, &cfg, false)
-
-			// Tests Generic options as gRPC Options
-			cfg = oconf.NewGRPCConfig(asGRPCOptions(tt.opts)...)
-			tt.asserts(t, &cfg, true)
-		})
-	}
-}
-
-func dropSelector(metric.InstrumentKind) metric.Aggregation {
-	return metric.AggregationDrop{}
-}
-
-func deltaSelector(metric.InstrumentKind) metricdata.Temporality {
-	return metricdata.DeltaTemporality
-}
-
-func asHTTPOptions(opts []oconf.GenericOption) []oconf.HTTPOption {
-	converted := make([]oconf.HTTPOption, len(opts))
-	for i, o := range opts {
-		converted[i] = oconf.NewHTTPOption(o.ApplyHTTPOption)
-	}
-	return converted
-}
-
-func asGRPCOptions(opts []oconf.GenericOption) []oconf.GRPCOption {
-	converted := make([]oconf.GRPCOption, len(opts))
-	for i, o := range opts {
-		converted[i] = oconf.NewGRPCOption(o.ApplyGRPCOption)
-	}
-	return converted
-}
diff --git a/exporters/otlp/otlpmetric/internal/oconf/optiontypes.go b/exporters/otlp/otlpmetric/internal/oconf/optiontypes.go
deleted file mode 100644
index e878ee74104..00000000000
--- a/exporters/otlp/otlpmetric/internal/oconf/optiontypes.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
-
-import "time"
-
-const (
-	// DefaultCollectorGRPCPort is the default gRPC port of the collector.
-	DefaultCollectorGRPCPort uint16 = 4317
-	// DefaultCollectorHTTPPort is the default HTTP port of the collector.
-	DefaultCollectorHTTPPort uint16 = 4318
-	// DefaultCollectorHost is the host address the Exporter will attempt
-	// connect to if no collector address is provided.
-	DefaultCollectorHost string = "localhost"
-)
-
-// Compression describes the compression used for payloads sent to the
-// collector.
-type Compression int
-
-const (
-	// NoCompression tells the driver to send payloads without
-	// compression.
-	NoCompression Compression = iota
-	// GzipCompression tells the driver to send payloads after
-	// compressing them with gzip.
-	GzipCompression
-)
-
-// RetrySettings defines configuration for retrying batches in case of export failure
-// using an exponential backoff.
-type RetrySettings struct {
-	// Enabled indicates whether to not retry sending batches in case of export failure.
-	Enabled bool
-	// InitialInterval the time to wait after the first failure before retrying.
-	InitialInterval time.Duration
-	// MaxInterval is the upper bound on backoff interval. Once this value is reached the delay between
-	// consecutive retries will always be `MaxInterval`.
-	MaxInterval time.Duration
-	// MaxElapsedTime is the maximum amount of time (including retries) spent trying to send a request/batch.
-	// Once this value is reached, the data is discarded.
-	MaxElapsedTime time.Duration
-}
diff --git a/exporters/otlp/otlpmetric/internal/oconf/tls.go b/exporters/otlp/otlpmetric/internal/oconf/tls.go
deleted file mode 100644
index 44bbe326860..00000000000
--- a/exporters/otlp/otlpmetric/internal/oconf/tls.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf"
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"errors"
-	"os"
-)
-
-// ReadTLSConfigFromFile reads a PEM certificate file and creates
-// a tls.Config that will use this certifate to verify a server certificate.
-func ReadTLSConfigFromFile(path string) (*tls.Config, error) {
-	b, err := os.ReadFile(path)
-	if err != nil {
-		return nil, err
-	}
-
-	return CreateTLSConfig(b)
-}
-
-// CreateTLSConfig creates a tls.Config from a raw certificate bytes
-// to verify a server certificate.
-func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
-	cp := x509.NewCertPool()
-	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
-		return nil, errors.New("failed to append certificate to the cert pool")
-	}
-
-	return &tls.Config{
-		RootCAs: cp,
-	}, nil
-}
diff --git a/exporters/otlp/otlpmetric/internal/otest/client.go b/exporters/otlp/otlpmetric/internal/otest/client.go
deleted file mode 100644
index 2200413e49c..00000000000
--- a/exporters/otlp/otlpmetric/internal/otest/client.go
+++ /dev/null
@@ -1,334 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package otest provides common testing utilities for all otlpmetric
-// exporters.
-//
-// Deprecated: package otest exists for historical compatibility, it should not
-// be used.
-package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
-
-import (
-	"context"
-	"fmt"
-	"testing"
-	"time"
-
-	"github.com/google/go-cmp/cmp"
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-	"google.golang.org/protobuf/proto"
-
-	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" // nolint: staticcheck  // Atomic deprecation.
-	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
-	collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
-	cpb "go.opentelemetry.io/proto/otlp/common/v1"
-	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
-	rpb "go.opentelemetry.io/proto/otlp/resource/v1"
-)
-
-var (
-	// Sat Jan 01 2000 00:00:00 GMT+0000.
-	start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
-	end   = start.Add(30 * time.Second)
-
-	kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
-		Value: &cpb.AnyValue_StringValue{StringValue: "alice"},
-	}}
-	kvBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
-		Value: &cpb.AnyValue_StringValue{StringValue: "bob"},
-	}}
-	kvSrvName = &cpb.KeyValue{Key: "service.name", Value: &cpb.AnyValue{
-		Value: &cpb.AnyValue_StringValue{StringValue: "test server"},
-	}}
-	kvSrvVer = &cpb.KeyValue{Key: "service.version", Value: &cpb.AnyValue{
-		Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"},
-	}}
-
-	min, max, sum = 2.0, 4.0, 90.0
-	hdp           = []*mpb.HistogramDataPoint{{
-		Attributes:        []*cpb.KeyValue{kvAlice},
-		StartTimeUnixNano: uint64(start.UnixNano()),
-		TimeUnixNano:      uint64(end.UnixNano()),
-		Count:             30,
-		Sum:               &sum,
-		ExplicitBounds:    []float64{1, 5},
-		BucketCounts:      []uint64{0, 30, 0},
-		Min:               &min,
-		Max:               &max,
-	}}
-
-	hist = &mpb.Histogram{
-		AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
-		DataPoints:             hdp,
-	}
-
-	dPtsInt64 = []*mpb.NumberDataPoint{
-		{
-			Attributes:        []*cpb.KeyValue{kvAlice},
-			StartTimeUnixNano: uint64(start.UnixNano()),
-			TimeUnixNano:      uint64(end.UnixNano()),
-			Value:             &mpb.NumberDataPoint_AsInt{AsInt: 1},
-		},
-		{
-			Attributes:        []*cpb.KeyValue{kvBob},
-			StartTimeUnixNano: uint64(start.UnixNano()),
-			TimeUnixNano:      uint64(end.UnixNano()),
-			Value:             &mpb.NumberDataPoint_AsInt{AsInt: 2},
-		},
-	}
-	dPtsFloat64 = []*mpb.NumberDataPoint{
-		{
-			Attributes:        []*cpb.KeyValue{kvAlice},
-			StartTimeUnixNano: uint64(start.UnixNano()),
-			TimeUnixNano:      uint64(end.UnixNano()),
-			Value:             &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0},
-		},
-		{
-			Attributes:        []*cpb.KeyValue{kvBob},
-			StartTimeUnixNano: uint64(start.UnixNano()),
-			TimeUnixNano:      uint64(end.UnixNano()),
-			Value:             &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0},
-		},
-	}
-
-	sumInt64 = &mpb.Sum{
-		AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
-		IsMonotonic:            true,
-		DataPoints:             dPtsInt64,
-	}
-	sumFloat64 = &mpb.Sum{
-		AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
-		IsMonotonic:            false,
-		DataPoints:             dPtsFloat64,
-	}
-
-	gaugeInt64   = &mpb.Gauge{DataPoints: dPtsInt64}
-	gaugeFloat64 = &mpb.Gauge{DataPoints: dPtsFloat64}
-
-	metrics = []*mpb.Metric{
-		{
-			Name:        "int64-gauge",
-			Description: "Gauge with int64 values",
-			Unit:        "1",
-			Data:        &mpb.Metric_Gauge{Gauge: gaugeInt64},
-		},
-		{
-			Name:        "float64-gauge",
-			Description: "Gauge with float64 values",
-			Unit:        "1",
-			Data:        &mpb.Metric_Gauge{Gauge: gaugeFloat64},
-		},
-		{
-			Name:        "int64-sum",
-			Description: "Sum with int64 values",
-			Unit:        "1",
-			Data:        &mpb.Metric_Sum{Sum: sumInt64},
-		},
-		{
-			Name:        "float64-sum",
-			Description: "Sum with float64 values",
-			Unit:        "1",
-			Data:        &mpb.Metric_Sum{Sum: sumFloat64},
-		},
-		{
-			Name:        "histogram",
-			Description: "Histogram",
-			Unit:        "1",
-			Data:        &mpb.Metric_Histogram{Histogram: hist},
-		},
-	}
-
-	scope = &cpb.InstrumentationScope{
-		Name:    "test/code/path",
-		Version: "v0.1.0",
-	}
-	scopeMetrics = []*mpb.ScopeMetrics{{
-		Scope:     scope,
-		Metrics:   metrics,
-		SchemaUrl: semconv.SchemaURL,
-	}}
-
-	res = &rpb.Resource{
-		Attributes: []*cpb.KeyValue{kvSrvName, kvSrvVer},
-	}
-	resourceMetrics = &mpb.ResourceMetrics{
-		Resource:     res,
-		ScopeMetrics: scopeMetrics,
-		SchemaUrl:    semconv.SchemaURL,
-	}
-)
-
-// ClientFactory is a function that when called returns a
-// internal.Client implementation that is connected to also returned
-// Collector implementation. The Client is ready to upload metric data to the
-// Collector which is ready to store that data.
-//
-// If resultCh is not nil, the returned Collector needs to use the responses
-// from that channel to send back to the client for every export request.
-type ClientFactory func(resultCh <-chan ExportResult) (internal.Client, Collector)
-
-// RunClientTests runs a suite of Client integration tests. For example:
-//
-//	t.Run("Integration", RunClientTests(factory))
-func RunClientTests(f ClientFactory) func(*testing.T) {
-	return func(t *testing.T) {
-		t.Run("ClientHonorsContextErrors", func(t *testing.T) {
-			t.Run("Shutdown", testCtxErrs(func() func(context.Context) error {
-				c, _ := f(nil)
-				return c.Shutdown
-			}))
-
-			t.Run("ForceFlush", testCtxErrs(func() func(context.Context) error {
-				c, _ := f(nil)
-				return c.ForceFlush
-			}))
-
-			t.Run("UploadMetrics", testCtxErrs(func() func(context.Context) error {
-				c, _ := f(nil)
-				return func(ctx context.Context) error {
-					return c.UploadMetrics(ctx, nil)
-				}
-			}))
-		})
-
-		t.Run("ForceFlushFlushes", func(t *testing.T) {
-			ctx := context.Background()
-			client, collector := f(nil)
-			require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
-
-			require.NoError(t, client.ForceFlush(ctx))
-			rm := collector.Collect().Dump()
-			// Data correctness is not important, just it was received.
-			require.Greater(t, len(rm), 0, "no data uploaded")
-
-			require.NoError(t, client.Shutdown(ctx))
-			rm = collector.Collect().Dump()
-			assert.Len(t, rm, 0, "client did not flush all data")
-		})
-
-		t.Run("UploadMetrics", func(t *testing.T) {
-			ctx := context.Background()
-			client, coll := f(nil)
-
-			require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
-			require.NoError(t, client.Shutdown(ctx))
-			got := coll.Collect().Dump()
-			require.Len(t, got, 1, "upload of one ResourceMetrics")
-			diff := cmp.Diff(got[0], resourceMetrics, cmp.Comparer(proto.Equal))
-			if diff != "" {
-				t.Fatalf("unexpected ResourceMetrics:\n%s", diff)
-			}
-		})
-
-		t.Run("PartialSuccess", func(t *testing.T) {
-			const n, msg = 2, "bad data"
-			rCh := make(chan ExportResult, 3)
-			rCh <- ExportResult{
-				Response: &collpb.ExportMetricsServiceResponse{
-					PartialSuccess: &collpb.ExportMetricsPartialSuccess{
-						RejectedDataPoints: n,
-						ErrorMessage:       msg,
-					},
-				},
-			}
-			rCh <- ExportResult{
-				Response: &collpb.ExportMetricsServiceResponse{
-					PartialSuccess: &collpb.ExportMetricsPartialSuccess{
-						// Should not be logged.
-						RejectedDataPoints: 0,
-						ErrorMessage:       "",
-					},
-				},
-			}
-			rCh <- ExportResult{
-				Response: &collpb.ExportMetricsServiceResponse{},
-			}
-
-			ctx := context.Background()
-			client, _ := f(rCh)
-
-			defer func(orig otel.ErrorHandler) {
-				otel.SetErrorHandler(orig)
-			}(otel.GetErrorHandler())
-
-			errs := []error{}
-			eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) })
-			otel.SetErrorHandler(eh)
-
-			require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
-			require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
-			require.NoError(t, client.UploadMetrics(ctx, resourceMetrics))
-			require.NoError(t, client.Shutdown(ctx))
-
-			require.Equal(t, 1, len(errs))
-			want := fmt.Sprintf("%s (%d metric data points rejected)", msg, n)
-			assert.ErrorContains(t, errs[0], want)
-		})
-
-		t.Run("Other HTTP success", func(t *testing.T) {
-			for code := 201; code <= 299; code++ {
-				t.Run(fmt.Sprintf("status_%d", code), func(t *testing.T) {
-					rCh := make(chan ExportResult, 1)
-					rCh <- ExportResult{
-						ResponseStatus: code,
-					}
-
-					ctx := context.Background()
-					client, _ := f(rCh)
-					defer func() {
-						assert.NoError(t, client.Shutdown(ctx))
-					}()
-
-					defer func(orig otel.ErrorHandler) {
-						otel.SetErrorHandler(orig)
-					}(otel.GetErrorHandler())
-
-					errs := []error{}
-					eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) })
-					otel.SetErrorHandler(eh)
-
-					assert.NoError(t, client.UploadMetrics(ctx, nil))
-					assert.Equal(t, 0, len(errs))
-				})
-			}
-		})
-	}
-}
-
-func testCtxErrs(factory func() func(context.Context) error) func(t *testing.T) {
-	return func(t *testing.T) {
-		t.Helper()
-		ctx, cancel := context.WithCancel(context.Background())
-		t.Cleanup(cancel)
-
-		t.Run("DeadlineExceeded", func(t *testing.T) {
-			innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond)
-			t.Cleanup(innerCancel)
-			<-innerCtx.Done()
-
-			f := factory()
-			assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded)
-		})
-
-		t.Run("Canceled", func(t *testing.T) {
-			innerCtx, innerCancel := context.WithCancel(ctx)
-			innerCancel()
-
-			f := factory()
-			assert.ErrorIs(t, f(innerCtx), context.Canceled)
-		})
-	}
-}
diff --git a/exporters/otlp/otlpmetric/internal/otest/client_test.go b/exporters/otlp/otlpmetric/internal/otest/client_test.go
deleted file mode 100644
index 3f7bb848e2c..00000000000
--- a/exporters/otlp/otlpmetric/internal/otest/client_test.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
-
-import (
-	"context"
-	"testing"
-
-	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/exporters/otlp/internal"                       // nolint: staticcheck  // Synchronous deprecation.
-	ominternal "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal" // nolint: staticcheck  // Atomic deprecation.
-	"go.opentelemetry.io/otel/sdk/metric"
-	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-	cpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
-	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
-)
-
-type client struct {
-	rCh     <-chan ExportResult
-	storage *Storage
-}
-
-func (c *client) Temporality(k metric.InstrumentKind) metricdata.Temporality {
-	return metric.DefaultTemporalitySelector(k)
-}
-
-func (c *client) Aggregation(k metric.InstrumentKind) metric.Aggregation {
-	return metric.DefaultAggregationSelector(k)
-}
-
-func (c *client) Collect() *Storage {
-	return c.storage
-}
-
-func (c *client) UploadMetrics(ctx context.Context, rm *mpb.ResourceMetrics) error {
-	c.storage.Add(&cpb.ExportMetricsServiceRequest{
-		ResourceMetrics: []*mpb.ResourceMetrics{rm},
-	})
-	if c.rCh != nil {
-		r := <-c.rCh
-		if r.Response != nil && r.Response.GetPartialSuccess() != nil {
-			msg := r.Response.GetPartialSuccess().GetErrorMessage()
-			n := r.Response.GetPartialSuccess().GetRejectedDataPoints()
-			if msg != "" || n > 0 {
-				otel.Handle(internal.MetricPartialSuccessError(n, msg))
-			}
-		}
-		return r.Err
-	}
-	return ctx.Err()
-}
-
-func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() }
-func (c *client) Shutdown(ctx context.Context) error   { return ctx.Err() }
-
-func TestClientTests(t *testing.T) {
-	factory := func(rCh <-chan ExportResult) (ominternal.Client, Collector) {
-		c := &client{rCh: rCh, storage: NewStorage()}
-		return c, c
-	}
-
-	t.Run("Integration", RunClientTests(factory))
-}
diff --git a/exporters/otlp/otlpmetric/internal/otest/collector.go b/exporters/otlp/otlpmetric/internal/otest/collector.go
deleted file mode 100644
index b31e308c965..00000000000
--- a/exporters/otlp/otlpmetric/internal/otest/collector.go
+++ /dev/null
@@ -1,440 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest"
-
-import (
-	"bytes"
-	"compress/gzip"
-	"context"
-	"crypto/ecdsa"
-	"crypto/elliptic"
-	"crypto/rand"
-	"crypto/tls"
-	"crypto/x509"
-	"crypto/x509/pkix" // nolint:depguard  // This is for testing.
-	"encoding/pem"
-	"errors"
-	"fmt"
-	"io"
-	"math/big"
-	"net"
-	"net/http"
-	"net/url"
-	"sync"
-	"time"
-
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/metadata"
-	"google.golang.org/protobuf/proto"
-
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" // nolint: staticcheck  // Atomic deprecation.
-	collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1"
-	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
-)
-
-// Collector is the collection target a Client sends metric uploads to.
-type Collector interface {
-	Collect() *Storage
-}
-
-type ExportResult struct {
-	Response       *collpb.ExportMetricsServiceResponse
-	ResponseStatus int
-	Err            error
-}
-
-// Storage stores uploaded OTLP metric data in their proto form.
-type Storage struct {
-	dataMu sync.Mutex
-	data   []*mpb.ResourceMetrics
-}
-
-// NewStorage returns a configure storage ready to store received requests.
-func NewStorage() *Storage {
-	return &Storage{}
-}
-
-// Add adds the request to the Storage.
-func (s *Storage) Add(request *collpb.ExportMetricsServiceRequest) {
-	s.dataMu.Lock()
-	defer s.dataMu.Unlock()
-	s.data = append(s.data, request.ResourceMetrics...)
-}
-
-// Dump returns all added ResourceMetrics and clears the storage.
-func (s *Storage) Dump() []*mpb.ResourceMetrics {
-	s.dataMu.Lock()
-	defer s.dataMu.Unlock()
-
-	var data []*mpb.ResourceMetrics
-	data, s.data = s.data, []*mpb.ResourceMetrics{}
-	return data
-}
-
-// GRPCCollector is an OTLP gRPC server that collects all requests it receives.
-type GRPCCollector struct {
-	collpb.UnimplementedMetricsServiceServer
-
-	headersMu sync.Mutex
-	headers   metadata.MD
-	storage   *Storage
-
-	resultCh <-chan ExportResult
-	listener net.Listener
-	srv      *grpc.Server
-}
-
-// NewGRPCCollector returns a *GRPCCollector that is listening at the provided
-// endpoint.
-//
-// If endpoint is an empty string, the returned collector will be listening on
-// the localhost interface at an OS chosen port.
-//
-// If errCh is not nil, the collector will respond to Export calls with errors
-// sent on that channel. This means that if errCh is not nil Export calls will
-// block until an error is received.
-func NewGRPCCollector(endpoint string, resultCh <-chan ExportResult) (*GRPCCollector, error) {
-	if endpoint == "" {
-		endpoint = "localhost:0"
-	}
-
-	c := &GRPCCollector{
-		storage:  NewStorage(),
-		resultCh: resultCh,
-	}
-
-	var err error
-	c.listener, err = net.Listen("tcp", endpoint)
-	if err != nil {
-		return nil, err
-	}
-
-	c.srv = grpc.NewServer()
-	collpb.RegisterMetricsServiceServer(c.srv, c)
-	go func() { _ = c.srv.Serve(c.listener) }()
-
-	return c, nil
-}
-
-// Shutdown shuts down the gRPC server closing all open connections and
-// listeners immediately.
-func (c *GRPCCollector) Shutdown() { c.srv.Stop() }
-
-// Addr returns the net.Addr c is listening at.
-func (c *GRPCCollector) Addr() net.Addr {
-	return c.listener.Addr()
-}
-
-// Collect returns the Storage holding all collected requests.
-func (c *GRPCCollector) Collect() *Storage {
-	return c.storage
-}
-
-// Headers returns the headers received for all requests.
-func (c *GRPCCollector) Headers() map[string][]string {
-	// Makes a copy.
-	c.headersMu.Lock()
-	defer c.headersMu.Unlock()
-	return metadata.Join(c.headers)
-}
-
-// Export handles the export req.
-func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) {
-	c.storage.Add(req)
-
-	if h, ok := metadata.FromIncomingContext(ctx); ok {
-		c.headersMu.Lock()
-		c.headers = metadata.Join(c.headers, h)
-		c.headersMu.Unlock()
-	}
-
-	if c.resultCh != nil {
-		r := <-c.resultCh
-		if r.Response == nil {
-			return &collpb.ExportMetricsServiceResponse{}, r.Err
-		}
-		return r.Response, r.Err
-	}
-	return &collpb.ExportMetricsServiceResponse{}, nil
-}
-
-var emptyExportMetricsServiceResponse = func() []byte {
-	body := collpb.ExportMetricsServiceResponse{}
-	r, err := proto.Marshal(&body)
-	if err != nil {
-		panic(err)
-	}
-	return r
-}()
-
-type HTTPResponseError struct {
-	Err    error
-	Status int
-	Header http.Header
-}
-
-func (e *HTTPResponseError) Error() string {
-	return fmt.Sprintf("%d: %s", e.Status, e.Err)
-}
-
-func (e *HTTPResponseError) Unwrap() error { return e.Err }
-
-// HTTPCollector is an OTLP HTTP server that collects all requests it receives.
-type HTTPCollector struct {
-	headersMu sync.Mutex
-	headers   http.Header
-	storage   *Storage
-
-	resultCh <-chan ExportResult
-	listener net.Listener
-	srv      *http.Server
-}
-
-// NewHTTPCollector returns a *HTTPCollector that is listening at the provided
-// endpoint.
-//
-// If endpoint is an empty string, the returned collector will be listening on
-// the localhost interface at an OS chosen port, not use TLS, and listen at the
-// default OTLP metric endpoint path ("/v1/metrics"). If the endpoint contains
-// a prefix of "https" the server will generate weak self-signed TLS
-// certificates and use them to server data. If the endpoint contains a path,
-// that path will be used instead of the default OTLP metric endpoint path.
-//
-// If errCh is not nil, the collector will respond to HTTP requests with errors
-// sent on that channel. This means that if errCh is not nil Export calls will
-// block until an error is received.
-func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPCollector, error) {
-	u, err := url.Parse(endpoint)
-	if err != nil {
-		return nil, err
-	}
-	if u.Host == "" {
-		u.Host = "localhost:0"
-	}
-	if u.Path == "" {
-		u.Path = oconf.DefaultMetricsPath
-	}
-
-	c := &HTTPCollector{
-		headers:  http.Header{},
-		storage:  NewStorage(),
-		resultCh: resultCh,
-	}
-
-	c.listener, err = net.Listen("tcp", u.Host)
-	if err != nil {
-		return nil, err
-	}
-
-	mux := http.NewServeMux()
-	mux.Handle(u.Path, http.HandlerFunc(c.handler))
-	c.srv = &http.Server{Handler: mux}
-	if u.Scheme == "https" {
-		cert, err := weakCertificate()
-		if err != nil {
-			return nil, err
-		}
-		c.srv.TLSConfig = &tls.Config{
-			Certificates: []tls.Certificate{cert},
-		}
-		go func() { _ = c.srv.ServeTLS(c.listener, "", "") }()
-	} else {
-		go func() { _ = c.srv.Serve(c.listener) }()
-	}
-	return c, nil
-}
-
-// Shutdown shuts down the HTTP server closing all open connections and
-// listeners.
-func (c *HTTPCollector) Shutdown(ctx context.Context) error {
-	return c.srv.Shutdown(ctx)
-}
-
-// Addr returns the net.Addr c is listening at.
-func (c *HTTPCollector) Addr() net.Addr {
-	return c.listener.Addr()
-}
-
-// Collect returns the Storage holding all collected requests.
-func (c *HTTPCollector) Collect() *Storage {
-	return c.storage
-}
-
-// Headers returns the headers received for all requests.
-func (c *HTTPCollector) Headers() map[string][]string {
-	// Makes a copy.
-	c.headersMu.Lock()
-	defer c.headersMu.Unlock()
-	return c.headers.Clone()
-}
-
-func (c *HTTPCollector) handler(w http.ResponseWriter, r *http.Request) {
-	c.respond(w, c.record(r))
-}
-
-func (c *HTTPCollector) record(r *http.Request) ExportResult {
-	// Currently only supports protobuf.
-	if v := r.Header.Get("Content-Type"); v != "application/x-protobuf" {
-		err := fmt.Errorf("content-type not supported: %s", v)
-		return ExportResult{Err: err}
-	}
-
-	body, err := c.readBody(r)
-	if err != nil {
-		return ExportResult{Err: err}
-	}
-	pbRequest := &collpb.ExportMetricsServiceRequest{}
-	err = proto.Unmarshal(body, pbRequest)
-	if err != nil {
-		return ExportResult{
-			Err: &HTTPResponseError{
-				Err:    err,
-				Status: http.StatusInternalServerError,
-			},
-		}
-	}
-	c.storage.Add(pbRequest)
-
-	c.headersMu.Lock()
-	for k, vals := range r.Header {
-		for _, v := range vals {
-			c.headers.Add(k, v)
-		}
-	}
-	c.headersMu.Unlock()
-
-	if c.resultCh != nil {
-		return <-c.resultCh
-	}
-	return ExportResult{Err: err}
-}
-
-func (c *HTTPCollector) readBody(r *http.Request) (body []byte, err error) {
-	var reader io.ReadCloser
-	switch r.Header.Get("Content-Encoding") {
-	case "gzip":
-		reader, err = gzip.NewReader(r.Body)
-		if err != nil {
-			_ = reader.Close()
-			return nil, &HTTPResponseError{
-				Err:    err,
-				Status: http.StatusInternalServerError,
-			}
-		}
-	default:
-		reader = r.Body
-	}
-
-	defer func() {
-		cErr := reader.Close()
-		if err == nil && cErr != nil {
-			err = &HTTPResponseError{
-				Err:    cErr,
-				Status: http.StatusInternalServerError,
-			}
-		}
-	}()
-	body, err = io.ReadAll(reader)
-	if err != nil {
-		err = &HTTPResponseError{
-			Err:    err,
-			Status: http.StatusInternalServerError,
-		}
-	}
-	return body, err
-}
-
-func (c *HTTPCollector) respond(w http.ResponseWriter, resp ExportResult) {
-	if resp.Err != nil {
-		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
-		w.Header().Set("X-Content-Type-Options", "nosniff")
-		var e *HTTPResponseError
-		if errors.As(resp.Err, &e) {
-			for k, vals := range e.Header {
-				for _, v := range vals {
-					w.Header().Add(k, v)
-				}
-			}
-			w.WriteHeader(e.Status)
-			fmt.Fprintln(w, e.Error())
-		} else {
-			w.WriteHeader(http.StatusBadRequest)
-			fmt.Fprintln(w, resp.Err.Error())
-		}
-		return
-	}
-
-	w.Header().Set("Content-Type", "application/x-protobuf")
-	if resp.ResponseStatus != 0 {
-		w.WriteHeader(resp.ResponseStatus)
-	} else {
-		w.WriteHeader(http.StatusOK)
-	}
-	if resp.Response == nil {
-		_, _ = w.Write(emptyExportMetricsServiceResponse)
-	} else {
-		r, err := proto.Marshal(resp.Response)
-		if err != nil {
-			panic(err)
-		}
-		_, _ = w.Write(r)
-	}
-}
-
-// Based on https://golang.org/src/crypto/tls/generate_cert.go,
-// simplified and weakened.
-func weakCertificate() (tls.Certificate, error) {
-	priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
-	if err != nil {
-		return tls.Certificate{}, err
-	}
-	notBefore := time.Now()
-	notAfter := notBefore.Add(time.Hour)
-	max := new(big.Int).Lsh(big.NewInt(1), 128)
-	sn, err := rand.Int(rand.Reader, max)
-	if err != nil {
-		return tls.Certificate{}, err
-	}
-	tmpl := x509.Certificate{
-		SerialNumber:          sn,
-		Subject:               pkix.Name{Organization: []string{"otel-go"}},
-		NotBefore:             notBefore,
-		NotAfter:              notAfter,
-		KeyUsage:              x509.KeyUsageDigitalSignature,
-		ExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
-		BasicConstraintsValid: true,
-		DNSNames:              []string{"localhost"},
-		IPAddresses:           []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)},
-	}
-	derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
-	if err != nil {
-		return tls.Certificate{}, err
-	}
-	var certBuf bytes.Buffer
-	err = pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
-	if err != nil {
-		return tls.Certificate{}, err
-	}
-	privBytes, err := x509.MarshalPKCS8PrivateKey(priv)
-	if err != nil {
-		return tls.Certificate{}, err
-	}
-	var privBuf bytes.Buffer
-	err = pem.Encode(&privBuf, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes})
-	if err != nil {
-		return tls.Certificate{}, err
-	}
-	return tls.X509KeyPair(certBuf.Bytes(), privBuf.Bytes())
-}
diff --git a/exporters/otlp/otlpmetric/internal/transform/attribute.go b/exporters/otlp/otlpmetric/internal/transform/attribute.go
deleted file mode 100644
index d382fac3576..00000000000
--- a/exporters/otlp/otlpmetric/internal/transform/attribute.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
-
-import (
-	"go.opentelemetry.io/otel/attribute"
-	cpb "go.opentelemetry.io/proto/otlp/common/v1"
-)
-
-// AttrIter transforms an attribute iterator into OTLP key-values.
-func AttrIter(iter attribute.Iterator) []*cpb.KeyValue {
-	l := iter.Len()
-	if l == 0 {
-		return nil
-	}
-
-	out := make([]*cpb.KeyValue, 0, l)
-	for iter.Next() {
-		out = append(out, KeyValue(iter.Attribute()))
-	}
-	return out
-}
-
-// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
-func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue {
-	if len(attrs) == 0 {
-		return nil
-	}
-
-	out := make([]*cpb.KeyValue, 0, len(attrs))
-	for _, kv := range attrs {
-		out = append(out, KeyValue(kv))
-	}
-	return out
-}
-
-// KeyValue transforms an attribute KeyValue into an OTLP key-value.
-func KeyValue(kv attribute.KeyValue) *cpb.KeyValue {
-	return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
-}
-
-// Value transforms an attribute Value into an OTLP AnyValue.
-func Value(v attribute.Value) *cpb.AnyValue {
-	av := new(cpb.AnyValue)
-	switch v.Type() {
-	case attribute.BOOL:
-		av.Value = &cpb.AnyValue_BoolValue{
-			BoolValue: v.AsBool(),
-		}
-	case attribute.BOOLSLICE:
-		av.Value = &cpb.AnyValue_ArrayValue{
-			ArrayValue: &cpb.ArrayValue{
-				Values: boolSliceValues(v.AsBoolSlice()),
-			},
-		}
-	case attribute.INT64:
-		av.Value = &cpb.AnyValue_IntValue{
-			IntValue: v.AsInt64(),
-		}
-	case attribute.INT64SLICE:
-		av.Value = &cpb.AnyValue_ArrayValue{
-			ArrayValue: &cpb.ArrayValue{
-				Values: int64SliceValues(v.AsInt64Slice()),
-			},
-		}
-	case attribute.FLOAT64:
-		av.Value = &cpb.AnyValue_DoubleValue{
-			DoubleValue: v.AsFloat64(),
-		}
-	case attribute.FLOAT64SLICE:
-		av.Value = &cpb.AnyValue_ArrayValue{
-			ArrayValue: &cpb.ArrayValue{
-				Values: float64SliceValues(v.AsFloat64Slice()),
-			},
-		}
-	case attribute.STRING:
-		av.Value = &cpb.AnyValue_StringValue{
-			StringValue: v.AsString(),
-		}
-	case attribute.STRINGSLICE:
-		av.Value = &cpb.AnyValue_ArrayValue{
-			ArrayValue: &cpb.ArrayValue{
-				Values: stringSliceValues(v.AsStringSlice()),
-			},
-		}
-	default:
-		av.Value = &cpb.AnyValue_StringValue{
-			StringValue: "INVALID",
-		}
-	}
-	return av
-}
-
-func boolSliceValues(vals []bool) []*cpb.AnyValue {
-	converted := make([]*cpb.AnyValue, len(vals))
-	for i, v := range vals {
-		converted[i] = &cpb.AnyValue{
-			Value: &cpb.AnyValue_BoolValue{
-				BoolValue: v,
-			},
-		}
-	}
-	return converted
-}
-
-func int64SliceValues(vals []int64) []*cpb.AnyValue {
-	converted := make([]*cpb.AnyValue, len(vals))
-	for i, v := range vals {
-		converted[i] = &cpb.AnyValue{
-			Value: &cpb.AnyValue_IntValue{
-				IntValue: v,
-			},
-		}
-	}
-	return converted
-}
-
-func float64SliceValues(vals []float64) []*cpb.AnyValue {
-	converted := make([]*cpb.AnyValue, len(vals))
-	for i, v := range vals {
-		converted[i] = &cpb.AnyValue{
-			Value: &cpb.AnyValue_DoubleValue{
-				DoubleValue: v,
-			},
-		}
-	}
-	return converted
-}
-
-func stringSliceValues(vals []string) []*cpb.AnyValue {
-	converted := make([]*cpb.AnyValue, len(vals))
-	for i, v := range vals {
-		converted[i] = &cpb.AnyValue{
-			Value: &cpb.AnyValue_StringValue{
-				StringValue: v,
-			},
-		}
-	}
-	return converted
-}
diff --git a/exporters/otlp/otlpmetric/internal/transform/attribute_test.go b/exporters/otlp/otlpmetric/internal/transform/attribute_test.go
deleted file mode 100644
index 1dbe674951c..00000000000
--- a/exporters/otlp/otlpmetric/internal/transform/attribute_test.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-
-	"go.opentelemetry.io/otel/attribute"
-	cpb "go.opentelemetry.io/proto/otlp/common/v1"
-)
-
-var (
-	attrBool         = attribute.Bool("bool", true)
-	attrBoolSlice    = attribute.BoolSlice("bool slice", []bool{true, false})
-	attrInt          = attribute.Int("int", 1)
-	attrIntSlice     = attribute.IntSlice("int slice", []int{-1, 1})
-	attrInt64        = attribute.Int64("int64", 1)
-	attrInt64Slice   = attribute.Int64Slice("int64 slice", []int64{-1, 1})
-	attrFloat64      = attribute.Float64("float64", 1)
-	attrFloat64Slice = attribute.Float64Slice("float64 slice", []float64{-1, 1})
-	attrString       = attribute.String("string", "o")
-	attrStringSlice  = attribute.StringSlice("string slice", []string{"o", "n"})
-	attrInvalid      = attribute.KeyValue{
-		Key:   attribute.Key("invalid"),
-		Value: attribute.Value{},
-	}
-
-	valBoolTrue  = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: true}}
-	valBoolFalse = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: false}}
-	valBoolSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
-		ArrayValue: &cpb.ArrayValue{
-			Values: []*cpb.AnyValue{valBoolTrue, valBoolFalse},
-		},
-	}}
-	valIntOne   = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: 1}}
-	valIntNOne  = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: -1}}
-	valIntSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
-		ArrayValue: &cpb.ArrayValue{
-			Values: []*cpb.AnyValue{valIntNOne, valIntOne},
-		},
-	}}
-	valDblOne   = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: 1}}
-	valDblNOne  = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: -1}}
-	valDblSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
-		ArrayValue: &cpb.ArrayValue{
-			Values: []*cpb.AnyValue{valDblNOne, valDblOne},
-		},
-	}}
-	valStrO     = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "o"}}
-	valStrN     = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "n"}}
-	valStrSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{
-		ArrayValue: &cpb.ArrayValue{
-			Values: []*cpb.AnyValue{valStrO, valStrN},
-		},
-	}}
-
-	kvBool         = &cpb.KeyValue{Key: "bool", Value: valBoolTrue}
-	kvBoolSlice    = &cpb.KeyValue{Key: "bool slice", Value: valBoolSlice}
-	kvInt          = &cpb.KeyValue{Key: "int", Value: valIntOne}
-	kvIntSlice     = &cpb.KeyValue{Key: "int slice", Value: valIntSlice}
-	kvInt64        = &cpb.KeyValue{Key: "int64", Value: valIntOne}
-	kvInt64Slice   = &cpb.KeyValue{Key: "int64 slice", Value: valIntSlice}
-	kvFloat64      = &cpb.KeyValue{Key: "float64", Value: valDblOne}
-	kvFloat64Slice = &cpb.KeyValue{Key: "float64 slice", Value: valDblSlice}
-	kvString       = &cpb.KeyValue{Key: "string", Value: valStrO}
-	kvStringSlice  = &cpb.KeyValue{Key: "string slice", Value: valStrSlice}
-	kvInvalid      = &cpb.KeyValue{
-		Key: "invalid",
-		Value: &cpb.AnyValue{
-			Value: &cpb.AnyValue_StringValue{StringValue: "INVALID"},
-		},
-	}
-)
-
-type attributeTest struct {
-	name string
-	in   []attribute.KeyValue
-	want []*cpb.KeyValue
-}
-
-func TestAttributeTransforms(t *testing.T) {
-	for _, test := range []attributeTest{
-		{"nil", nil, nil},
-		{"empty", []attribute.KeyValue{}, nil},
-		{
-			"invalid",
-			[]attribute.KeyValue{attrInvalid},
-			[]*cpb.KeyValue{kvInvalid},
-		},
-		{
-			"bool",
-			[]attribute.KeyValue{attrBool},
-			[]*cpb.KeyValue{kvBool},
-		},
-		{
-			"bool slice",
-			[]attribute.KeyValue{attrBoolSlice},
-			[]*cpb.KeyValue{kvBoolSlice},
-		},
-		{
-			"int",
-			[]attribute.KeyValue{attrInt},
-			[]*cpb.KeyValue{kvInt},
-		},
-		{
-			"int slice",
-			[]attribute.KeyValue{attrIntSlice},
-			[]*cpb.KeyValue{kvIntSlice},
-		},
-		{
-			"int64",
-			[]attribute.KeyValue{attrInt64},
-			[]*cpb.KeyValue{kvInt64},
-		},
-		{
-			"int64 slice",
-			[]attribute.KeyValue{attrInt64Slice},
-			[]*cpb.KeyValue{kvInt64Slice},
-		},
-		{
-			"float64",
-			[]attribute.KeyValue{attrFloat64},
-			[]*cpb.KeyValue{kvFloat64},
-		},
-		{
-			"float64 slice",
-			[]attribute.KeyValue{attrFloat64Slice},
-			[]*cpb.KeyValue{kvFloat64Slice},
-		},
-		{
-			"string",
-			[]attribute.KeyValue{attrString},
-			[]*cpb.KeyValue{kvString},
-		},
-		{
-			"string slice",
-			[]attribute.KeyValue{attrStringSlice},
-			[]*cpb.KeyValue{kvStringSlice},
-		},
-		{
-			"all",
-			[]attribute.KeyValue{
-				attrBool,
-				attrBoolSlice,
-				attrInt,
-				attrIntSlice,
-				attrInt64,
-				attrInt64Slice,
-				attrFloat64,
-				attrFloat64Slice,
-				attrString,
-				attrStringSlice,
-				attrInvalid,
-			},
-			[]*cpb.KeyValue{
-				kvBool,
-				kvBoolSlice,
-				kvInt,
-				kvIntSlice,
-				kvInt64,
-				kvInt64Slice,
-				kvFloat64,
-				kvFloat64Slice,
-				kvString,
-				kvStringSlice,
-				kvInvalid,
-			},
-		},
-	} {
-		t.Run(test.name, func(t *testing.T) {
-			t.Run("KeyValues", func(t *testing.T) {
-				assert.ElementsMatch(t, test.want, KeyValues(test.in))
-			})
-			t.Run("AttrIter", func(t *testing.T) {
-				s := attribute.NewSet(test.in...)
-				assert.ElementsMatch(t, test.want, AttrIter(s.Iter()))
-			})
-		})
-	}
-}
diff --git a/exporters/otlp/otlpmetric/internal/transform/doc.go b/exporters/otlp/otlpmetric/internal/transform/doc.go
deleted file mode 100644
index 59c4951e211..00000000000
--- a/exporters/otlp/otlpmetric/internal/transform/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package transform provides transformation functionality from the
-// sdk/metric/metricdata data-types into OTLP data-types.
-//
-// Deprecated: package transform exists for historical compatibility, it should
-// not be used.
-package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
diff --git a/exporters/otlp/otlpmetric/internal/transform/error.go b/exporters/otlp/otlpmetric/internal/transform/error.go
deleted file mode 100644
index d98f8e082c9..00000000000
--- a/exporters/otlp/otlpmetric/internal/transform/error.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
-
-import (
-	"errors"
-	"fmt"
-	"strings"
-
-	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
-)
-
-var (
-	errUnknownAggregation = errors.New("unknown aggregation")
-	errUnknownTemporality = errors.New("unknown temporality")
-)
-
-type errMetric struct {
-	m   *mpb.Metric
-	err error
-}
-
-func (e errMetric) Unwrap() error {
-	return e.err
-}
-
-func (e errMetric) Error() string {
-	format := "invalid metric (name: %q, description: %q, unit: %q): %s"
-	return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err)
-}
-
-func (e errMetric) Is(target error) bool {
-	return errors.Is(e.err, target)
-}
-
-// multiErr is used by the data-type transform functions to wrap multiple
-// errors into a single return value. The error message will show all errors
-// as a list and scope them by the datatype name that is returning them.
-type multiErr struct {
-	datatype string
-	errs     []error
-}
-
-// errOrNil returns nil if e contains no errors, otherwise it returns e.
-func (e *multiErr) errOrNil() error {
-	if len(e.errs) == 0 {
-		return nil
-	}
-	return e
-}
-
-// append adds err to e. If err is a multiErr, its errs are flattened into e.
-func (e *multiErr) append(err error) {
-	// Do not use errors.As here, this should only be flattened one layer. If
-	// there is a *multiErr several steps down the chain, all the errors above
-	// it will be discarded if errors.As is used instead.
-	switch other := err.(type) {
-	case *multiErr:
-		// Flatten err errors into e.
-		e.errs = append(e.errs, other.errs...)
-	default:
-		e.errs = append(e.errs, err)
-	}
-}
-
-func (e *multiErr) Error() string {
-	es := make([]string, len(e.errs))
-	for i, err := range e.errs {
-		es[i] = fmt.Sprintf("* %s", err)
-	}
-
-	format := "%d errors occurred transforming %s:\n\t%s"
-	return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t"))
-}
-
-func (e *multiErr) Unwrap() error {
-	switch len(e.errs) {
-	case 0:
-		return nil
-	case 1:
-		return e.errs[0]
-	}
-
-	// Return a multiErr without the leading error.
-	cp := &multiErr{
-		datatype: e.datatype,
-		errs:     make([]error, len(e.errs)-1),
-	}
-	copy(cp.errs, e.errs[1:])
-	return cp
-}
-
-func (e *multiErr) Is(target error) bool {
-	if len(e.errs) == 0 {
-		return false
-	}
-	// Check if the first error is target.
-	return errors.Is(e.errs[0], target)
-}
diff --git a/exporters/otlp/otlpmetric/internal/transform/error_test.go b/exporters/otlp/otlpmetric/internal/transform/error_test.go
deleted file mode 100644
index 4f407c1b7ab..00000000000
--- a/exporters/otlp/otlpmetric/internal/transform/error_test.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
-
-import (
-	"strings"
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-)
-
-var (
-	e0 = errMetric{m: pbMetrics[0], err: errUnknownAggregation}
-	e1 = errMetric{m: pbMetrics[1], err: errUnknownTemporality}
-)
-
-type testingErr struct{}
-
-func (testingErr) Error() string { return "testing error" }
-
-// errFunc is a non-comparable error type.
-type errFunc func() string
-
-func (e errFunc) Error() string {
-	return e()
-}
-
-func TestMultiErr(t *testing.T) {
-	const name = "TestMultiErr"
-	me := &multiErr{datatype: name}
-
-	t.Run("ErrOrNil", func(t *testing.T) {
-		require.Nil(t, me.errOrNil())
-		me.errs = []error{e0}
-		assert.Error(t, me.errOrNil())
-	})
-
-	var testErr testingErr
-	t.Run("AppendError", func(t *testing.T) {
-		me.append(testErr)
-		assert.Equal(t, testErr, me.errs[len(me.errs)-1])
-	})
-
-	t.Run("AppendFlattens", func(t *testing.T) {
-		other := &multiErr{datatype: "OtherTestMultiErr", errs: []error{e1}}
-		me.append(other)
-		assert.Equal(t, e1, me.errs[len(me.errs)-1])
-	})
-
-	t.Run("ErrorMessage", func(t *testing.T) {
-		// Test the overall structure of the message, but not the exact
-		// language so this doesn't become a change-indicator.
-		msg := me.Error()
-		lines := strings.Split(msg, "\n")
-		assert.Equalf(t, 4, len(lines), "expected a 4 line error message, got:\n\n%s", msg)
-		assert.Contains(t, msg, name)
-		assert.Contains(t, msg, e0.Error())
-		assert.Contains(t, msg, testErr.Error())
-		assert.Contains(t, msg, e1.Error())
-	})
-
-	t.Run("ErrorIs", func(t *testing.T) {
-		assert.ErrorIs(t, me, errUnknownAggregation)
-		assert.ErrorIs(t, me, e0)
-		assert.ErrorIs(t, me, testErr)
-		assert.ErrorIs(t, me, errUnknownTemporality)
-		assert.ErrorIs(t, me, e1)
-
-		errUnknown := errFunc(func() string { return "unknown error" })
-		assert.NotErrorIs(t, me, errUnknown)
-
-		var empty multiErr
-		assert.NotErrorIs(t, &empty, errUnknownTemporality)
-	})
-}
diff --git a/exporters/otlp/otlpmetric/internal/transform/metricdata.go b/exporters/otlp/otlpmetric/internal/transform/metricdata.go
deleted file mode 100644
index 4ca2f958fa8..00000000000
--- a/exporters/otlp/otlpmetric/internal/transform/metricdata.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
-
-import (
-	"fmt"
-	"time"
-
-	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-	cpb "go.opentelemetry.io/proto/otlp/common/v1"
-	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
-	rpb "go.opentelemetry.io/proto/otlp/resource/v1"
-)
-
-// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm
-// contains invalid ScopeMetrics, an error will be returned along with an OTLP
-// ResourceMetrics that contains partial OTLP ScopeMetrics.
-func ResourceMetrics(rm *metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) {
-	sms, err := ScopeMetrics(rm.ScopeMetrics)
-	return &mpb.ResourceMetrics{
-		Resource: &rpb.Resource{
-			Attributes: AttrIter(rm.Resource.Iter()),
-		},
-		ScopeMetrics: sms,
-		SchemaUrl:    rm.Resource.SchemaURL(),
-	}, err
-}
-
-// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If
-// sms contains invalid metric values, an error will be returned along with a
-// slice that contains partial OTLP ScopeMetrics.
-func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) {
-	errs := &multiErr{datatype: "ScopeMetrics"}
-	out := make([]*mpb.ScopeMetrics, 0, len(sms))
-	for _, sm := range sms {
-		ms, err := Metrics(sm.Metrics)
-		if err != nil {
-			errs.append(err)
-		}
-
-		out = append(out, &mpb.ScopeMetrics{
-			Scope: &cpb.InstrumentationScope{
-				Name:    sm.Scope.Name,
-				Version: sm.Scope.Version,
-			},
-			Metrics:   ms,
-			SchemaUrl: sm.Scope.SchemaURL,
-		})
-	}
-	return out, errs.errOrNil()
-}
-
-// Metrics returns a slice of OTLP Metric generated from ms. If ms contains
-// invalid metric values, an error will be returned along with a slice that
-// contains partial OTLP Metrics.
-func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) {
-	errs := &multiErr{datatype: "Metrics"}
-	out := make([]*mpb.Metric, 0, len(ms))
-	for _, m := range ms {
-		o, err := metric(m)
-		if err != nil {
-			// Do not include invalid data. Drop the metric, report the error.
-			errs.append(errMetric{m: o, err: err})
-			continue
-		}
-		out = append(out, o)
-	}
-	return out, errs.errOrNil()
-}
-
-func metric(m metricdata.Metrics) (*mpb.Metric, error) {
-	var err error
-	out := &mpb.Metric{
-		Name:        m.Name,
-		Description: m.Description,
-		Unit:        string(m.Unit),
-	}
-	switch a := m.Data.(type) {
-	case metricdata.Gauge[int64]:
-		out.Data = Gauge[int64](a)
-	case metricdata.Gauge[float64]:
-		out.Data = Gauge[float64](a)
-	case metricdata.Sum[int64]:
-		out.Data, err = Sum[int64](a)
-	case metricdata.Sum[float64]:
-		out.Data, err = Sum[float64](a)
-	case metricdata.Histogram[int64]:
-		out.Data, err = Histogram(a)
-	case metricdata.Histogram[float64]:
-		out.Data, err = Histogram(a)
-	case metricdata.ExponentialHistogram[int64]:
-		out.Data, err = ExponentialHistogram(a)
-	case metricdata.ExponentialHistogram[float64]:
-		out.Data, err = ExponentialHistogram(a)
-	default:
-		return out, fmt.Errorf("%w: %T", errUnknownAggregation, a)
-	}
-	return out, err
-}
-
-// Gauge returns an OTLP Metric_Gauge generated from g.
-func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge {
-	return &mpb.Metric_Gauge{
-		Gauge: &mpb.Gauge{
-			DataPoints: DataPoints(g.DataPoints),
-		},
-	}
-}
-
-// Sum returns an OTLP Metric_Sum generated from s. An error is returned
-// if the temporality of s is unknown.
-func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) {
-	t, err := Temporality(s.Temporality)
-	if err != nil {
-		return nil, err
-	}
-	return &mpb.Metric_Sum{
-		Sum: &mpb.Sum{
-			AggregationTemporality: t,
-			IsMonotonic:            s.IsMonotonic,
-			DataPoints:             DataPoints(s.DataPoints),
-		},
-	}, nil
-}
-
-// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts.
-func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint {
-	out := make([]*mpb.NumberDataPoint, 0, len(dPts))
-	for _, dPt := range dPts {
-		ndp := &mpb.NumberDataPoint{
-			Attributes:        AttrIter(dPt.Attributes.Iter()),
-			StartTimeUnixNano: timeUnixNano(dPt.StartTime),
-			TimeUnixNano:      timeUnixNano(dPt.Time),
-		}
-		switch v := any(dPt.Value).(type) {
-		case int64:
-			ndp.Value = &mpb.NumberDataPoint_AsInt{
-				AsInt: v,
-			}
-		case float64:
-			ndp.Value = &mpb.NumberDataPoint_AsDouble{
-				AsDouble: v,
-			}
-		}
-		out = append(out, ndp)
-	}
-	return out
-}
-
-// Histogram returns an OTLP Metric_Histogram generated from h. An error is
-// returned if the temporality of h is unknown.
-func Histogram[N int64 | float64](h metricdata.Histogram[N]) (*mpb.Metric_Histogram, error) {
-	t, err := Temporality(h.Temporality)
-	if err != nil {
-		return nil, err
-	}
-	return &mpb.Metric_Histogram{
-		Histogram: &mpb.Histogram{
-			AggregationTemporality: t,
-			DataPoints:             HistogramDataPoints(h.DataPoints),
-		},
-	}, nil
-}
-
-// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated
-// from dPts.
-func HistogramDataPoints[N int64 | float64](dPts []metricdata.HistogramDataPoint[N]) []*mpb.HistogramDataPoint {
-	out := make([]*mpb.HistogramDataPoint, 0, len(dPts))
-	for _, dPt := range dPts {
-		sum := float64(dPt.Sum)
-		hdp := &mpb.HistogramDataPoint{
-			Attributes:        AttrIter(dPt.Attributes.Iter()),
-			StartTimeUnixNano: timeUnixNano(dPt.StartTime),
-			TimeUnixNano:      timeUnixNano(dPt.Time),
-			Count:             dPt.Count,
-			Sum:               &sum,
-			BucketCounts:      dPt.BucketCounts,
-			ExplicitBounds:    dPt.Bounds,
-		}
-		if v, ok := dPt.Min.Value(); ok {
-			vF64 := float64(v)
-			hdp.Min = &vF64
-		}
-		if v, ok := dPt.Max.Value(); ok {
-			vF64 := float64(v)
-			hdp.Max = &vF64
-		}
-		out = append(out, hdp)
-	}
-	return out
-}
-
-// ExponentialHistogram returns an OTLP Metric_ExponentialHistogram generated from h. An error is
-// returned if the temporality of h is unknown.
-func ExponentialHistogram[N int64 | float64](h metricdata.ExponentialHistogram[N]) (*mpb.Metric_ExponentialHistogram, error) {
-	t, err := Temporality(h.Temporality)
-	if err != nil {
-		return nil, err
-	}
-	return &mpb.Metric_ExponentialHistogram{
-		ExponentialHistogram: &mpb.ExponentialHistogram{
-			AggregationTemporality: t,
-			DataPoints:             ExponentialHistogramDataPoints(h.DataPoints),
-		},
-	}, nil
-}
-
-// ExponentialHistogramDataPoints returns a slice of OTLP ExponentialHistogramDataPoint generated
-// from dPts.
-func ExponentialHistogramDataPoints[N int64 | float64](dPts []metricdata.ExponentialHistogramDataPoint[N]) []*mpb.ExponentialHistogramDataPoint {
-	out := make([]*mpb.ExponentialHistogramDataPoint, 0, len(dPts))
-	for _, dPt := range dPts {
-		sum := float64(dPt.Sum)
-		ehdp := &mpb.ExponentialHistogramDataPoint{
-			Attributes:        AttrIter(dPt.Attributes.Iter()),
-			StartTimeUnixNano: timeUnixNano(dPt.StartTime),
-			TimeUnixNano:      timeUnixNano(dPt.Time),
-			Count:             dPt.Count,
-			Sum:               &sum,
-			Scale:             dPt.Scale,
-			ZeroCount:         dPt.ZeroCount,
-
-			Positive: ExponentialHistogramDataPointBuckets(dPt.PositiveBucket),
-			Negative: ExponentialHistogramDataPointBuckets(dPt.NegativeBucket),
-		}
-		if v, ok := dPt.Min.Value(); ok {
-			vF64 := float64(v)
-			ehdp.Min = &vF64
-		}
-		if v, ok := dPt.Max.Value(); ok {
-			vF64 := float64(v)
-			ehdp.Max = &vF64
-		}
-		out = append(out, ehdp)
-	}
-	return out
-}
-
-// ExponentialHistogramDataPointBuckets returns an OTLP ExponentialHistogramDataPoint_Buckets generated
-// from bucket.
-func ExponentialHistogramDataPointBuckets(bucket metricdata.ExponentialBucket) *mpb.ExponentialHistogramDataPoint_Buckets {
-	return &mpb.ExponentialHistogramDataPoint_Buckets{
-		Offset:       bucket.Offset,
-		BucketCounts: bucket.Counts,
-	}
-}
-
-// Temporality returns an OTLP AggregationTemporality generated from t. If t
-// is unknown, an error is returned along with the invalid
-// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED.
-func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) {
-	switch t {
-	case metricdata.DeltaTemporality:
-		return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil
-	case metricdata.CumulativeTemporality:
-		return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil
-	default:
-		err := fmt.Errorf("%w: %s", errUnknownTemporality, t)
-		return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err
-	}
-}
-
-// timeUnixNano returns t as a Unix time, the number of nanoseconds elapsed
-// since January 1, 1970 UTC as uint64.
-// The result is undefined if the Unix time
-// in nanoseconds cannot be represented by an int64
-// (a date before the year 1678 or after 2262).
-// timeUnixNano on the zero Time returns 0.
-// The result does not depend on the location associated with t.
-func timeUnixNano(t time.Time) uint64 {
-	if t.IsZero() {
-		return 0
-	}
-	return uint64(t.UnixNano())
-}
diff --git a/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go b/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go
deleted file mode 100644
index e733e7846e0..00000000000
--- a/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go
+++ /dev/null
@@ -1,610 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform"
-
-import (
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-	"github.com/stretchr/testify/require"
-
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/sdk/instrumentation"
-	"go.opentelemetry.io/otel/sdk/metric/metricdata"
-	"go.opentelemetry.io/otel/sdk/resource"
-	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
-	cpb "go.opentelemetry.io/proto/otlp/common/v1"
-	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
-	rpb "go.opentelemetry.io/proto/otlp/resource/v1"
-)
-
-type unknownAggT struct {
-	metricdata.Aggregation
-}
-
-var (
-	// Sat Jan 01 2000 00:00:00 GMT+0000.
-	start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
-	end   = start.Add(30 * time.Second)
-
-	alice = attribute.NewSet(attribute.String("user", "alice"))
-	bob   = attribute.NewSet(attribute.String("user", "bob"))
-
-	pbAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
-		Value: &cpb.AnyValue_StringValue{StringValue: "alice"},
-	}}
-	pbBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
-		Value: &cpb.AnyValue_StringValue{StringValue: "bob"},
-	}}
-
-	minA, maxA, sumA = 2.0, 4.0, 90.0
-	minB, maxB, sumB = 4.0, 150.0, 234.0
-	otelHDPInt64     = []metricdata.HistogramDataPoint[int64]{{
-		Attributes:   alice,
-		StartTime:    start,
-		Time:         end,
-		Count:        30,
-		Bounds:       []float64{1, 5},
-		BucketCounts: []uint64{0, 30, 0},
-		Min:          metricdata.NewExtrema(int64(minA)),
-		Max:          metricdata.NewExtrema(int64(maxA)),
-		Sum:          int64(sumA),
-	}, {
-		Attributes:   bob,
-		StartTime:    start,
-		Time:         end,
-		Count:        3,
-		Bounds:       []float64{1, 5},
-		BucketCounts: []uint64{0, 1, 2},
-		Min:          metricdata.NewExtrema(int64(minB)),
-		Max:          metricdata.NewExtrema(int64(maxB)),
-		Sum:          int64(sumB),
-	}}
-	otelHDPFloat64 = []metricdata.HistogramDataPoint[float64]{{
-		Attributes:   alice,
-		StartTime:    start,
-		Time:         end,
-		Count:        30,
-		Bounds:       []float64{1, 5},
-		BucketCounts: []uint64{0, 30, 0},
-		Min:          metricdata.NewExtrema(minA),
-		Max:          metricdata.NewExtrema(maxA),
-		Sum:          sumA,
-	}, {
-		Attributes:   bob,
-		StartTime:    start,
-		Time:         end,
-		Count:        3,
-		Bounds:       []float64{1, 5},
-		BucketCounts: []uint64{0, 1, 2},
-		Min:          metricdata.NewExtrema(minB),
-		Max:          metricdata.NewExtrema(maxB),
-		Sum:          sumB,
-	}}
-
-	otelEBucketA = metricdata.ExponentialBucket{
-		Offset: 5,
-		Counts: []uint64{0, 5, 0, 5},
-	}
-	otelEBucketB = metricdata.ExponentialBucket{
-		Offset: 3,
-		Counts: []uint64{0, 5, 0, 5},
-	}
-	otelEBucketsC = metricdata.ExponentialBucket{
-		Offset: 5,
-		Counts: []uint64{0, 1},
-	}
-	otelEBucketsD = metricdata.ExponentialBucket{
-		Offset: 3,
-		Counts: []uint64{0, 1},
-	}
-
-	otelEHDPInt64 = []metricdata.ExponentialHistogramDataPoint[int64]{{
-		Attributes:     alice,
-		StartTime:      start,
-		Time:           end,
-		Count:          30,
-		Scale:          2,
-		ZeroCount:      10,
-		PositiveBucket: otelEBucketA,
-		NegativeBucket: otelEBucketB,
-		ZeroThreshold:  .01,
-		Min:            metricdata.NewExtrema(int64(minA)),
-		Max:            metricdata.NewExtrema(int64(maxA)),
-		Sum:            int64(sumA),
-	}, {
-		Attributes:     bob,
-		StartTime:      start,
-		Time:           end,
-		Count:          3,
-		Scale:          4,
-		ZeroCount:      1,
-		PositiveBucket: otelEBucketsC,
-		NegativeBucket: otelEBucketsD,
-		ZeroThreshold:  .02,
-		Min:            metricdata.NewExtrema(int64(minB)),
-		Max:            metricdata.NewExtrema(int64(maxB)),
-		Sum:            int64(sumB),
-	}}
-	otelEHDPFloat64 = []metricdata.ExponentialHistogramDataPoint[float64]{{
-		Attributes:     alice,
-		StartTime:      start,
-		Time:           end,
-		Count:          30,
-		Scale:          2,
-		ZeroCount:      10,
-		PositiveBucket: otelEBucketA,
-		NegativeBucket: otelEBucketB,
-		ZeroThreshold:  .01,
-		Min:            metricdata.NewExtrema(minA),
-		Max:            metricdata.NewExtrema(maxA),
-		Sum:            sumA,
-	}, {
-		Attributes:     bob,
-		StartTime:      start,
-		Time:           end,
-		Count:          3,
-		Scale:          4,
-		ZeroCount:      1,
-		PositiveBucket: otelEBucketsC,
-		NegativeBucket: otelEBucketsD,
-		ZeroThreshold:  .02,
-		Min:            metricdata.NewExtrema(minB),
-		Max:            metricdata.NewExtrema(maxB),
-		Sum:            sumB,
-	}}
-
-	pbHDP = []*mpb.HistogramDataPoint{{
-		Attributes:        []*cpb.KeyValue{pbAlice},
-		StartTimeUnixNano: uint64(start.UnixNano()),
-		TimeUnixNano:      uint64(end.UnixNano()),
-		Count:             30,
-		Sum:               &sumA,
-		ExplicitBounds:    []float64{1, 5},
-		BucketCounts:      []uint64{0, 30, 0},
-		Min:               &minA,
-		Max:               &maxA,
-	}, {
-		Attributes:        []*cpb.KeyValue{pbBob},
-		StartTimeUnixNano: uint64(start.UnixNano()),
-		TimeUnixNano:      uint64(end.UnixNano()),
-		Count:             3,
-		Sum:               &sumB,
-		ExplicitBounds:    []float64{1, 5},
-		BucketCounts:      []uint64{0, 1, 2},
-		Min:               &minB,
-		Max:               &maxB,
-	}}
-
-	pbEHDPBA = &mpb.ExponentialHistogramDataPoint_Buckets{
-		Offset:       5,
-		BucketCounts: []uint64{0, 5, 0, 5},
-	}
-	pbEHDPBB = &mpb.ExponentialHistogramDataPoint_Buckets{
-		Offset:       3,
-		BucketCounts: []uint64{0, 5, 0, 5},
-	}
-	pbEHDPBC = &mpb.ExponentialHistogramDataPoint_Buckets{
-		Offset:       5,
-		BucketCounts: []uint64{0, 1},
-	}
-	pbEHDPBD = &mpb.ExponentialHistogramDataPoint_Buckets{
-		Offset:       3,
-		BucketCounts: []uint64{0, 1},
-	}
-
-	pbEHDP = []*mpb.ExponentialHistogramDataPoint{{
-		Attributes:        []*cpb.KeyValue{pbAlice},
-		StartTimeUnixNano: uint64(start.UnixNano()),
-		TimeUnixNano:      uint64(end.UnixNano()),
-		Count:             30,
-		Sum:               &sumA,
-		Scale:             2,
-		ZeroCount:         10,
-		Positive:          pbEHDPBA,
-		Negative:          pbEHDPBB,
-		Min:               &minA,
-		Max:               &maxA,
-	}, {
-		Attributes:        []*cpb.KeyValue{pbBob},
-		StartTimeUnixNano: uint64(start.UnixNano()),
-		TimeUnixNano:      uint64(end.UnixNano()),
-		Count:             3,
-		Sum:               &sumB,
-		Scale:             4,
-		ZeroCount:         1,
-		Positive:          pbEHDPBC,
-		Negative:          pbEHDPBD,
-		Min:               &minB,
-		Max:               &maxB,
-	}}
-
-	otelHistInt64 = metricdata.Histogram[int64]{
-		Temporality: metricdata.DeltaTemporality,
-		DataPoints:  otelHDPInt64,
-	}
-	otelHistFloat64 = metricdata.Histogram[float64]{
-		Temporality: metricdata.DeltaTemporality,
-		DataPoints:  otelHDPFloat64,
-	}
-	invalidTemporality metricdata.Temporality
-	otelHistInvalid    = metricdata.Histogram[int64]{
-		Temporality: invalidTemporality,
-		DataPoints:  otelHDPInt64,
-	}
-
-	otelExpoHistInt64 = metricdata.ExponentialHistogram[int64]{
-		Temporality: metricdata.DeltaTemporality,
-		DataPoints:  otelEHDPInt64,
-	}
-	otelExpoHistFloat64 = metricdata.ExponentialHistogram[float64]{
-		Temporality: metricdata.DeltaTemporality,
-		DataPoints:  otelEHDPFloat64,
-	}
-	otelExpoHistInvalid = metricdata.ExponentialHistogram[int64]{
-		Temporality: invalidTemporality,
-		DataPoints:  otelEHDPInt64,
-	}
-
-	pbHist = &mpb.Histogram{
-		AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
-		DataPoints:             pbHDP,
-	}
-
-	pbExpoHist = &mpb.ExponentialHistogram{
-		AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
-		DataPoints:             pbEHDP,
-	}
-
-	otelDPtsInt64 = []metricdata.DataPoint[int64]{
-		{Attributes: alice, StartTime: start, Time: end, Value: 1},
-		{Attributes: bob, StartTime: start, Time: end, Value: 2},
-	}
-	otelDPtsFloat64 = []metricdata.DataPoint[float64]{
-		{Attributes: alice, StartTime: start, Time: end, Value: 1.0},
-		{Attributes: bob, StartTime: start, Time: end, Value: 2.0},
-	}
-
-	pbDPtsInt64 = []*mpb.NumberDataPoint{
-		{
-			Attributes:        []*cpb.KeyValue{pbAlice},
-			StartTimeUnixNano: uint64(start.UnixNano()),
-			TimeUnixNano:      uint64(end.UnixNano()),
-			Value:             &mpb.NumberDataPoint_AsInt{AsInt: 1},
-		},
-		{
-			Attributes:        []*cpb.KeyValue{pbBob},
-			StartTimeUnixNano: uint64(start.UnixNano()),
-			TimeUnixNano:      uint64(end.UnixNano()),
-			Value:             &mpb.NumberDataPoint_AsInt{AsInt: 2},
-		},
-	}
-	pbDPtsFloat64 = []*mpb.NumberDataPoint{
-		{
-			Attributes:        []*cpb.KeyValue{pbAlice},
-			StartTimeUnixNano: uint64(start.UnixNano()),
-			TimeUnixNano:      uint64(end.UnixNano()),
-			Value:             &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0},
-		},
-		{
-			Attributes:        []*cpb.KeyValue{pbBob},
-			StartTimeUnixNano: uint64(start.UnixNano()),
-			TimeUnixNano:      uint64(end.UnixNano()),
-			Value:             &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0},
-		},
-	}
-
-	otelSumInt64 = metricdata.Sum[int64]{
-		Temporality: metricdata.CumulativeTemporality,
-		IsMonotonic: true,
-		DataPoints:  otelDPtsInt64,
-	}
-	otelSumFloat64 = metricdata.Sum[float64]{
-		Temporality: metricdata.DeltaTemporality,
-		IsMonotonic: false,
-		DataPoints:  otelDPtsFloat64,
-	}
-	otelSumInvalid = metricdata.Sum[float64]{
-		Temporality: invalidTemporality,
-		IsMonotonic: false,
-		DataPoints:  otelDPtsFloat64,
-	}
-
-	pbSumInt64 = &mpb.Sum{
-		AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
-		IsMonotonic:            true,
-		DataPoints:             pbDPtsInt64,
-	}
-	pbSumFloat64 = &mpb.Sum{
-		AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA,
-		IsMonotonic:            false,
-		DataPoints:             pbDPtsFloat64,
-	}
-
-	otelGaugeInt64         = metricdata.Gauge[int64]{DataPoints: otelDPtsInt64}
-	otelGaugeFloat64       = metricdata.Gauge[float64]{DataPoints: otelDPtsFloat64}
-	otelGaugeZeroStartTime = metricdata.Gauge[int64]{DataPoints: []metricdata.DataPoint[int64]{{Attributes: alice, StartTime: time.Time{}, Time: end, Value: 1}}}
-
-	pbGaugeInt64         = &mpb.Gauge{DataPoints: pbDPtsInt64}
-	pbGaugeFloat64       = &mpb.Gauge{DataPoints: pbDPtsFloat64}
-	pbGaugeZeroStartTime = &mpb.Gauge{DataPoints: []*mpb.NumberDataPoint{
-		{
-			Attributes:        []*cpb.KeyValue{pbAlice},
-			StartTimeUnixNano: 0,
-			TimeUnixNano:      uint64(end.UnixNano()),
-			Value:             &mpb.NumberDataPoint_AsInt{AsInt: 1},
-		},
-	}}
-
-	unknownAgg  unknownAggT
-	otelMetrics = []metricdata.Metrics{
-		{
-			Name:        "int64-gauge",
-			Description: "Gauge with int64 values",
-			Unit:        "1",
-			Data:        otelGaugeInt64,
-		},
-		{
-			Name:        "float64-gauge",
-			Description: "Gauge with float64 values",
-			Unit:        "1",
-			Data:        otelGaugeFloat64,
-		},
-		{
-			Name:        "int64-sum",
-			Description: "Sum with int64 values",
-			Unit:        "1",
-			Data:        otelSumInt64,
-		},
-		{
-			Name:        "float64-sum",
-			Description: "Sum with float64 values",
-			Unit:        "1",
-			Data:        otelSumFloat64,
-		},
-		{
-			Name:        "invalid-sum",
-			Description: "Sum with invalid temporality",
-			Unit:        "1",
-			Data:        otelSumInvalid,
-		},
-		{
-			Name:        "int64-histogram",
-			Description: "Histogram",
-			Unit:        "1",
-			Data:        otelHistInt64,
-		},
-		{
-			Name:        "float64-histogram",
-			Description: "Histogram",
-			Unit:        "1",
-			Data:        otelHistFloat64,
-		},
-		{
-			Name:        "invalid-histogram",
-			Description: "Invalid histogram",
-			Unit:        "1",
-			Data:        otelHistInvalid,
-		},
-		{
-			Name:        "unknown",
-			Description: "Unknown aggregation",
-			Unit:        "1",
-			Data:        unknownAgg,
-		},
-		{
-			Name:        "int64-ExponentialHistogram",
-			Description: "Exponential Histogram",
-			Unit:        "1",
-			Data:        otelExpoHistInt64,
-		},
-		{
-			Name:        "float64-ExponentialHistogram",
-			Description: "Exponential Histogram",
-			Unit:        "1",
-			Data:        otelExpoHistFloat64,
-		},
-		{
-			Name:        "invalid-ExponentialHistogram",
-			Description: "Invalid Exponential Histogram",
-			Unit:        "1",
-			Data:        otelExpoHistInvalid,
-		},
-		{
-			Name:        "zero-time",
-			Description: "Gauge with 0 StartTime",
-			Unit:        "1",
-			Data:        otelGaugeZeroStartTime,
-		},
-	}
-
-	pbMetrics = []*mpb.Metric{
-		{
-			Name:        "int64-gauge",
-			Description: "Gauge with int64 values",
-			Unit:        "1",
-			Data:        &mpb.Metric_Gauge{Gauge: pbGaugeInt64},
-		},
-		{
-			Name:        "float64-gauge",
-			Description: "Gauge with float64 values",
-			Unit:        "1",
-			Data:        &mpb.Metric_Gauge{Gauge: pbGaugeFloat64},
-		},
-		{
-			Name:        "int64-sum",
-			Description: "Sum with int64 values",
-			Unit:        "1",
-			Data:        &mpb.Metric_Sum{Sum: pbSumInt64},
-		},
-		{
-			Name:        "float64-sum",
-			Description: "Sum with float64 values",
-			Unit:        "1",
-			Data:        &mpb.Metric_Sum{Sum: pbSumFloat64},
-		},
-		{
-			Name:        "int64-histogram",
-			Description: "Histogram",
-			Unit:        "1",
-			Data:        &mpb.Metric_Histogram{Histogram: pbHist},
-		},
-		{
-			Name:        "float64-histogram",
-			Description: "Histogram",
-			Unit:        "1",
-			Data:        &mpb.Metric_Histogram{Histogram: pbHist},
-		},
-		{
-			Name:        "int64-ExponentialHistogram",
-			Description: "Exponential Histogram",
-			Unit:        "1",
-			Data:        &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist},
-		},
-		{
-			Name:        "float64-ExponentialHistogram",
-			Description: "Exponential Histogram",
-			Unit:        "1",
-			Data:        &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist},
-		},
-		{
-			Name:        "zero-time",
-			Description: "Gauge with 0 StartTime",
-			Unit:        "1",
-			Data:        &mpb.Metric_Gauge{Gauge: pbGaugeZeroStartTime},
-		},
-	}
-
-	otelScopeMetrics = []metricdata.ScopeMetrics{{
-		Scope: instrumentation.Scope{
-			Name:      "test/code/path",
-			Version:   "v0.1.0",
-			SchemaURL: semconv.SchemaURL,
-		},
-		Metrics: otelMetrics,
-	}}
-
-	pbScopeMetrics = []*mpb.ScopeMetrics{{
-		Scope: &cpb.InstrumentationScope{
-			Name:    "test/code/path",
-			Version: "v0.1.0",
-		},
-		Metrics:   pbMetrics,
-		SchemaUrl: semconv.SchemaURL,
-	}}
-
-	otelRes = resource.NewWithAttributes(
-		semconv.SchemaURL,
-		semconv.ServiceName("test server"),
-		semconv.ServiceVersion("v0.1.0"),
-	)
-
-	pbRes = &rpb.Resource{
-		Attributes: []*cpb.KeyValue{
-			{
-				Key: "service.name",
-				Value: &cpb.AnyValue{
-					Value: &cpb.AnyValue_StringValue{StringValue: "test server"},
-				},
-			},
-			{
-				Key: "service.version",
-				Value: &cpb.AnyValue{
-					Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"},
-				},
-			},
-		},
-	}
-
-	otelResourceMetrics = &metricdata.ResourceMetrics{
-		Resource:     otelRes,
-		ScopeMetrics: otelScopeMetrics,
-	}
-
-	pbResourceMetrics = &mpb.ResourceMetrics{
-		Resource:     pbRes,
-		ScopeMetrics: pbScopeMetrics,
-		SchemaUrl:    semconv.SchemaURL,
-	}
-)
-
-func TestTransformations(t *testing.T) {
-	// Run tests from the "bottom-up" of the metricdata data-types and halt
-	// when a failure occurs to ensure the clearest failure message (as
-	// opposed to the opposite of testing from the top-down which will obscure
-	// errors deep inside the structs).
-
-	// DataPoint types.
-	assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPInt64))
-	assert.Equal(t, pbHDP, HistogramDataPoints(otelHDPFloat64))
-	assert.Equal(t, pbDPtsInt64, DataPoints[int64](otelDPtsInt64))
-	require.Equal(t, pbDPtsFloat64, DataPoints[float64](otelDPtsFloat64))
-	assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPInt64))
-	assert.Equal(t, pbEHDP, ExponentialHistogramDataPoints(otelEHDPFloat64))
-	assert.Equal(t, pbEHDPBA, ExponentialHistogramDataPointBuckets(otelEBucketA))
-
-	// Aggregations.
-	h, err := Histogram(otelHistInt64)
-	assert.NoError(t, err)
-	assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h)
-	h, err = Histogram(otelHistFloat64)
-	assert.NoError(t, err)
-	assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h)
-	h, err = Histogram(otelHistInvalid)
-	assert.ErrorIs(t, err, errUnknownTemporality)
-	assert.Nil(t, h)
-
-	s, err := Sum[int64](otelSumInt64)
-	assert.NoError(t, err)
-	assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumInt64}, s)
-	s, err = Sum[float64](otelSumFloat64)
-	assert.NoError(t, err)
-	assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumFloat64}, s)
-	s, err = Sum[float64](otelSumInvalid)
-	assert.ErrorIs(t, err, errUnknownTemporality)
-	assert.Nil(t, s)
-
-	assert.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, Gauge[int64](otelGaugeInt64))
-	require.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, Gauge[float64](otelGaugeFloat64))
-
-	e, err := ExponentialHistogram(otelExpoHistInt64)
-	assert.NoError(t, err)
-	assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e)
-	e, err = ExponentialHistogram(otelExpoHistFloat64)
-	assert.NoError(t, err)
-	assert.Equal(t, &mpb.Metric_ExponentialHistogram{ExponentialHistogram: pbExpoHist}, e)
-	e, err = ExponentialHistogram(otelExpoHistInvalid)
-	assert.ErrorIs(t, err, errUnknownTemporality)
-	assert.Nil(t, e)
-
-	// Metrics.
-	m, err := Metrics(otelMetrics)
-	assert.ErrorIs(t, err, errUnknownTemporality)
-	assert.ErrorIs(t, err, errUnknownAggregation)
-	require.Equal(t, pbMetrics, m)
-
-	// Scope Metrics.
-	sm, err := ScopeMetrics(otelScopeMetrics)
-	assert.ErrorIs(t, err, errUnknownTemporality)
-	assert.ErrorIs(t, err, errUnknownAggregation)
-	require.Equal(t, pbScopeMetrics, sm)
-
-	// Resource Metrics.
-	rm, err := ResourceMetrics(otelResourceMetrics)
-	assert.ErrorIs(t, err, errUnknownTemporality)
-	assert.ErrorIs(t, err, errUnknownAggregation)
-	require.Equal(t, pbResourceMetrics, rm)
-}
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
index ff0647deec3..16f9af12b66 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go
@@ -61,7 +61,11 @@ func newClient(ctx context.Context, cfg oconf.Config) (*client, error) {
 	if c.conn == nil {
 		// If the caller did not provide a ClientConn when the client was
 		// created, create one using the configuration they did provide.
-		conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, cfg.DialOptions...)
+		userAgent := "OTel Go OTLP over gRPC metrics exporter/" + Version()
+		dialOpts := []grpc.DialOption{grpc.WithUserAgent(userAgent)}
+		dialOpts = append(dialOpts, cfg.DialOptions...)
+
+		conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, dialOpts...)
 		if err != nil {
 			return nil, err
 		}
@@ -172,28 +176,36 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
 // duration to wait for if an explicit throttle time is included in err.
 func retryable(err error) (bool, time.Duration) {
 	s := status.Convert(err)
+	return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
 	switch s.Code() {
 	case codes.Canceled,
 		codes.DeadlineExceeded,
-		codes.ResourceExhausted,
 		codes.Aborted,
 		codes.OutOfRange,
 		codes.Unavailable,
 		codes.DataLoss:
-		return true, throttleDelay(s)
+		// Additionally, handle RetryInfo.
+		_, d := throttleDelay(s)
+		return true, d
+	case codes.ResourceExhausted:
+		// Retry only if the server signals that the recovery from resource exhaustion is possible.
+		return throttleDelay(s)
 	}
 
 	// Not a retry-able error.
 	return false, 0
 }
 
-// throttleDelay returns a duration to wait for if an explicit throttle time
-// is included in the response status.
-func throttleDelay(s *status.Status) time.Duration {
+// throttleDelay returns if the status is RetryInfo
+// and the duration to wait for if an explicit throttle time is included.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
 	for _, detail := range s.Details() {
 		if t, ok := detail.(*errdetails.RetryInfo); ok {
-			return t.RetryDelay.AsDuration()
+			return true, t.RetryDelay.AsDuration()
 		}
 	}
-	return 0
+	return false, 0
 }
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/client_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/client_test.go
index 1de64a77268..03f07d69991 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/client_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/client_test.go
@@ -33,15 +33,17 @@ import (
 	"go.opentelemetry.io/otel/sdk/metric/metricdata"
 )
 
-func TestThrottleDuration(t *testing.T) {
+func TestThrottleDelay(t *testing.T) {
 	c := codes.ResourceExhausted
 	testcases := []struct {
-		status   *status.Status
-		expected time.Duration
+		status       *status.Status
+		wantOK       bool
+		wantDuration time.Duration
 	}{
 		{
-			status:   status.New(c, "NoRetryInfo"),
-			expected: 0,
+			status:       status.New(c, "NoRetryInfo"),
+			wantOK:       false,
+			wantDuration: 0,
 		},
 		{
 			status: func() *status.Status {
@@ -53,7 +55,8 @@ func TestThrottleDuration(t *testing.T) {
 				require.NoError(t, err)
 				return s
 			}(),
-			expected: 15 * time.Millisecond,
+			wantOK:       true,
+			wantDuration: 15 * time.Millisecond,
 		},
 		{
 			status: func() *status.Status {
@@ -63,7 +66,8 @@ func TestThrottleDuration(t *testing.T) {
 				require.NoError(t, err)
 				return s
 			}(),
-			expected: 0,
+			wantOK:       false,
+			wantDuration: 0,
 		},
 		{
 			status: func() *status.Status {
@@ -76,7 +80,8 @@ func TestThrottleDuration(t *testing.T) {
 				require.NoError(t, err)
 				return s
 			}(),
-			expected: 13 * time.Minute,
+			wantOK:       true,
+			wantDuration: 13 * time.Minute,
 		},
 		{
 			status: func() *status.Status {
@@ -91,13 +96,16 @@ func TestThrottleDuration(t *testing.T) {
 				require.NoError(t, err)
 				return s
 			}(),
-			expected: 13 * time.Minute,
+			wantOK:       true,
+			wantDuration: 13 * time.Minute,
 		},
 	}
 
 	for _, tc := range testcases {
 		t.Run(tc.status.Message(), func(t *testing.T) {
-			require.Equal(t, tc.expected, throttleDelay(tc.status))
+			ok, d := throttleDelay(tc.status)
+			assert.Equal(t, tc.wantOK, ok)
+			assert.Equal(t, tc.wantDuration, d)
 		})
 	}
 }
@@ -112,7 +120,7 @@ func TestRetryable(t *testing.T) {
 		codes.NotFound:           false,
 		codes.AlreadyExists:      false,
 		codes.PermissionDenied:   false,
-		codes.ResourceExhausted:  true,
+		codes.ResourceExhausted:  false,
 		codes.FailedPrecondition: false,
 		codes.Aborted:            true,
 		codes.OutOfRange:         true,
@@ -129,6 +137,20 @@ func TestRetryable(t *testing.T) {
 	}
 }
 
+func TestRetryableGRPCStatusResourceExhaustedWithRetryInfo(t *testing.T) {
+	delay := 15 * time.Millisecond
+	s, err := status.New(codes.ResourceExhausted, "WithRetryInfo").WithDetails(
+		&errdetails.RetryInfo{
+			RetryDelay: durationpb.New(delay),
+		},
+	)
+	require.NoError(t, err)
+
+	ok, d := retryableGRPCStatus(s)
+	assert.True(t, ok)
+	assert.Equal(t, delay, d)
+}
+
 type clientShim struct {
 	*client
 }
@@ -136,9 +158,11 @@ type clientShim struct {
 func (clientShim) Temporality(metric.InstrumentKind) metricdata.Temporality {
 	return metricdata.CumulativeTemporality
 }
+
 func (clientShim) Aggregation(metric.InstrumentKind) metric.Aggregation {
 	return nil
 }
+
 func (clientShim) ForceFlush(ctx context.Context) error {
 	return ctx.Err()
 }
@@ -186,7 +210,7 @@ func TestConfig(t *testing.T) {
 		require.NoError(t, exp.Shutdown(ctx))
 
 		got := coll.Headers()
-		require.Regexp(t, "OTel OTLP Exporter Go/[01]\\..*", got)
+		require.Regexp(t, "OTel Go OTLP over gRPC metrics exporter/[01]\\..*", got)
 		require.Contains(t, got, key)
 		assert.Equal(t, got[key], []string{headers[key]})
 	})
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
index 6ba3600b1c1..fbd495e7d7d 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go
@@ -109,13 +109,7 @@ func compressorToCompression(compressor string) oconf.Compression {
 }
 
 // WithCompressor sets the compressor the gRPC client uses.
-//
-// It is the responsibility of the caller to ensure that the compressor set
-// has been registered with google.golang.org/grpc/encoding (see
-// encoding.RegisterCompressor for more information). For example, to register
-// the gzip compressor import the package:
-//
-//	import _ "google.golang.org/grpc/encoding/gzip"
+// Supported compressor values: "gzip".
 //
 // If the OTEL_EXPORTER_OTLP_COMPRESSION or
 // OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
index 7820619bf60..7be572a79d0 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go
@@ -12,6 +12,85 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package otlpmetricgrpc provides an otlpmetric.Exporter that communicates
-// with an OTLP receiving endpoint using gRPC.
+/*
+Package otlpmetricgrpc provides an OTLP metrics exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New] and used with a [metric.PeriodicReader].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a host.
+The value may additionally a port, a scheme, and a path.
+The value accepts "http" and "https" scheme.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT.
+OTEL_EXPORTER_OTLP_METRICS_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
+the filepath  to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
+aggregation temporality to use on the basis of instrument kind. Supported values:
+  - "cumulative" - Cumulative aggregation temporality for all instrument kinds,
+  - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
+    Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
+  - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
+    Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
+
+The configuration can be overridden by [WithTemporalitySelector] option.
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
+default aggregation to use for histogram instruments. Supported values:
+  - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
+  - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
+
+The configuration can be overridden by [WithAggregationSelector] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
+[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
+*/
 package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod
index 4b865521994..5dd47288af3 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod
@@ -1,36 +1,37 @@
 module go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
 
-go 1.19
+go 1.20
 
 retract v0.32.2 // Contains unresolvable dependencies.
 
 require (
 	github.com/cenkalti/backoff/v4 v4.2.1
-	github.com/google/go-cmp v0.5.9
+	github.com/google/go-cmp v0.6.0
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/sdk/metric v0.39.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/sdk/metric v1.21.0
 	go.opentelemetry.io/proto/otlp v1.0.0
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc
-	google.golang.org/grpc v1.57.0
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d
+	google.golang.org/grpc v1.59.0
 	google.golang.org/protobuf v1.31.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+	github.com/kr/text v0.2.0 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/net v0.10.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	golang.org/x/text v0.9.0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
+	github.com/rogpeppe/go-internal v1.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
+	golang.org/x/net v0.17.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+	golang.org/x/text v0.13.0 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
@@ -40,10 +41,6 @@ replace go.opentelemetry.io/otel/sdk => ../../../../sdk
 
 replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric
 
-replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../
-
 replace go.opentelemetry.io/otel/metric => ../../../../metric
 
 replace go.opentelemetry.io/otel/trace => ../../../../trace
-
-replace go.opentelemetry.io/otel/exporters/otlp/internal/retry => ../../internal/retry
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum
index 9397989cf3d..b3410b5f2ff 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum
@@ -1,44 +1,47 @@
 github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
 github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
+github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
 go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
-google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
-google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
index 1d571294695..17951ceb451 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig.go
@@ -174,13 +174,13 @@ func stringToHeader(value string) map[string]string {
 			global.Error(errors.New("missing '="), "parse headers", "input", header)
 			continue
 		}
-		name, err := url.QueryUnescape(n)
+		name, err := url.PathUnescape(n)
 		if err != nil {
 			global.Error(err, "escape header key", "key", n)
 			continue
 		}
 		trimmedName := strings.TrimSpace(name)
-		value, err := url.QueryUnescape(v)
+		value, err := url.PathUnescape(v)
 		if err != nil {
 			global.Error(err, "escape header value", "value", v)
 			continue
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig_test.go
index cec506208d5..6cbe0c7ab11 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/envconfig/envconfig_test.go
@@ -427,7 +427,12 @@ func TestStringToHeader(t *testing.T) {
 			want:  map[string]string{"userId": "alice"},
 		},
 		{
-			name:  "multiples headers encoded",
+			name:  "simple header conforms to RFC 3986 spec",
+			value: " userId = alice+test ",
+			want:  map[string]string{"userId": "alice+test"},
+		},
+		{
+			name:  "multiple headers encoded",
 			value: "userId=alice,serverNode=DF%3A28,isProduction=false",
 			want: map[string]string{
 				"userId":       "alice",
@@ -435,6 +440,16 @@ func TestStringToHeader(t *testing.T) {
 				"isProduction": "false",
 			},
 		},
+		{
+			name:  "multiple headers encoded per RFC 3986 spec",
+			value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test",
+			want: map[string]string{
+				"userId":       "alice+test",
+				"serverNode":   "DF:28",
+				"isProduction": "false",
+				"namespace":    "localhost/test",
+			},
+		},
 		{
 			name:  "invalid headers format",
 			value: "userId:alice",
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
index 40a4469f77a..a85f2712224 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options.go
@@ -30,7 +30,6 @@ import (
 	"google.golang.org/grpc/credentials/insecure"
 	"google.golang.org/grpc/encoding/gzip"
 
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
 	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/retry"
 	"go.opentelemetry.io/otel/sdk/metric"
 )
@@ -122,7 +121,6 @@ func cleanPath(urlPath string, defaultPath string) string {
 // NewGRPCConfig returns a new Config with all settings applied from opts and
 // any unset setting using the default gRPC config values.
 func NewGRPCConfig(opts ...GRPCOption) Config {
-	userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
 	cfg := Config{
 		Metrics: SignalConfig{
 			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
@@ -134,7 +132,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
 			AggregationSelector: metric.DefaultAggregationSelector,
 		},
 		RetryConfig: retry.DefaultConfig,
-		DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
 	}
 	cfg = ApplyGRPCEnvConfigs(cfg)
 	for _, opt := range opts {
@@ -158,9 +155,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
 	if cfg.Metrics.Compression == GzipCompression {
 		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
 	}
-	if len(cfg.DialOptions) != 0 {
-		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
-	}
 	if cfg.ReconnectionPeriod != 0 {
 		p := grpc.ConnectParams{
 			Backoff:           backoff.DefaultConfig,
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options_test.go
index 8687acabcfa..64457efbf08 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/oconf/options_test.go
@@ -203,7 +203,7 @@ func TestConfigs(t *testing.T) {
 			},
 			asserts: func(t *testing.T, c *Config, grpcOption bool) {
 				if grpcOption {
-					//TODO: make sure gRPC's credentials actually works
+					// TODO: make sure gRPC's credentials actually works
 					assert.NotNil(t, c.Metrics.GRPCCredentials)
 				} else {
 					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client.go
index 3bb415e441f..154d4dd3c8c 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/client.go
@@ -38,7 +38,7 @@ import (
 
 var (
 	// Sat Jan 01 2000 00:00:00 GMT+0000.
-	start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
+	start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0))
 	end   = start.Add(30 * time.Second)
 
 	kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/collector.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/collector.go
index f5eb0a4af9c..f08fbd5c5f7 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/collector.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/otest/collector.go
@@ -195,6 +195,8 @@ func (e *HTTPResponseError) Unwrap() error { return e.Err }
 
 // HTTPCollector is an OTLP HTTP server that collects all requests it receives.
 type HTTPCollector struct {
+	plainTextResponse bool
+
 	headersMu sync.Mutex
 	headers   http.Header
 	storage   *Storage
@@ -217,7 +219,7 @@ type HTTPCollector struct {
 // If errCh is not nil, the collector will respond to HTTP requests with errors
 // sent on that channel. This means that if errCh is not nil Export calls will
 // block until an error is received.
-func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPCollector, error) {
+func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult, opts ...func(*HTTPCollector)) (*HTTPCollector, error) {
 	u, err := url.Parse(endpoint)
 	if err != nil {
 		return nil, err
@@ -234,6 +236,9 @@ func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPColle
 		storage:  NewStorage(),
 		resultCh: resultCh,
 	}
+	for _, opt := range opts {
+		opt(c)
+	}
 
 	c.listener, err = net.Listen("tcp", u.Host)
 	if err != nil {
@@ -242,7 +247,11 @@ func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPColle
 
 	mux := http.NewServeMux()
 	mux.Handle(u.Path, http.HandlerFunc(c.handler))
-	c.srv = &http.Server{Handler: mux}
+	c.srv = &http.Server{
+		Handler:      mux,
+		ReadTimeout:  10 * time.Second,
+		WriteTimeout: 10 * time.Second,
+	}
 	if u.Scheme == "https" {
 		cert, err := weakCertificate()
 		if err != nil {
@@ -258,6 +267,14 @@ func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPColle
 	return c, nil
 }
 
+// WithHTTPCollectorRespondingPlainText makes the HTTPCollector return
+// a plaintext, instead of protobuf, response.
+func WithHTTPCollectorRespondingPlainText() func(*HTTPCollector) {
+	return func(s *HTTPCollector) {
+		s.plainTextResponse = true
+	}
+}
+
 // Shutdown shuts down the HTTP server closing all open connections and
 // listeners.
 func (c *HTTPCollector) Shutdown(ctx context.Context) error {
@@ -378,6 +395,13 @@ func (c *HTTPCollector) respond(w http.ResponseWriter, resp ExportResult) {
 		return
 	}
 
+	if c.plainTextResponse {
+		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+		w.WriteHeader(http.StatusOK)
+		_, _ = w.Write([]byte("OK"))
+		return
+	}
+
 	w.Header().Set("Content-Type", "application/x-protobuf")
 	w.WriteHeader(http.StatusOK)
 	if resp.Response == nil {
diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata_test.go
index b94c48dae8d..676e5785633 100644
--- a/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/internal/transform/metricdata_test.go
@@ -40,7 +40,7 @@ type unknownAggT struct {
 
 var (
 	// Sat Jan 01 2000 00:00:00 GMT+0000.
-	start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
+	start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0))
 	end   = start.Add(30 * time.Second)
 
 	alice = attribute.NewSet(attribute.String("user", "alice"))
diff --git a/exporters/otlp/otlpmetric/version.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
similarity index 81%
rename from exporters/otlp/otlpmetric/version.go
rename to exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
index caec1c40886..983968c6a3b 100644
--- a/exporters/otlp/otlpmetric/version.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/version.go
@@ -12,9 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
+package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
 
-// Version is the current release version of the OpenTelemetry OTLP metrics exporter in use.
+// Version is the current release version of the OpenTelemetry OTLP over gRPC metrics exporter in use.
 func Version() string {
-	return "0.39.0"
+	return "0.44.0"
 }
diff --git a/exporters/otlp/otlpmetric/version_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/version_test.go
similarity index 90%
rename from exporters/otlp/otlpmetric/version_test.go
rename to exporters/otlp/otlpmetric/otlpmetricgrpc/version_test.go
index e0f3a3ef3b5..9b7f0544ad5 100644
--- a/exporters/otlp/otlpmetric/version_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/version_test.go
@@ -12,15 +12,13 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package otlpmetric_test
+package otlpmetricgrpc
 
 import (
 	"regexp"
 	"testing"
 
 	"github.com/stretchr/testify/assert"
-
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
 )
 
 // regex taken from https://github.com/Masterminds/semver/tree/v3.1.1
@@ -29,6 +27,6 @@ var versionRegex = regexp.MustCompile(`^v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
 	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$`)
 
 func TestVersionSemver(t *testing.T) {
-	v := otlpmetric.Version()
+	v := Version()
 	assert.NotNil(t, versionRegex.FindStringSubmatch(v), "version is not semver: %s", v)
 }
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
index 33f5474c1aa..73463c91d5f 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/client.go
@@ -18,6 +18,7 @@ import (
 	"bytes"
 	"compress/gzip"
 	"context"
+	"errors"
 	"fmt"
 	"io"
 	"net"
@@ -30,7 +31,6 @@ import (
 	"google.golang.org/protobuf/proto"
 
 	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
 	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal"
 	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf"
 	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
@@ -89,7 +89,7 @@ func newClient(cfg oconf.Config) (*client, error) {
 		return nil, err
 	}
 
-	userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
+	userAgent := "OTel Go OTLP over HTTP/protobuf metrics exporter/" + Version()
 	req.Header.Set("User-Agent", userAgent)
 
 	if n := len(cfg.Metrics.Headers); n > 0 {
@@ -148,6 +148,10 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
 
 		request.reset(iCtx)
 		resp, err := c.httpClient.Do(request.Request)
+		var urlErr *url.Error
+		if errors.As(err, &urlErr) && urlErr.Temporary() {
+			return newResponseError(http.Header{})
+		}
 		if err != nil {
 			return err
 		}
@@ -162,8 +166,11 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
 			if _, err := io.Copy(&respData, resp.Body); err != nil {
 				return err
 			}
+			if respData.Len() == 0 {
+				return nil
+			}
 
-			if respData.Len() != 0 {
+			if resp.Header.Get("Content-Type") == "application/x-protobuf" {
 				var respProto colmetricpb.ExportMetricsServiceResponse
 				if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
 					return err
@@ -179,7 +186,10 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou
 				}
 			}
 			return nil
-		case sc == http.StatusTooManyRequests, sc == http.StatusServiceUnavailable:
+		case sc == http.StatusTooManyRequests,
+			sc == http.StatusBadGateway,
+			sc == http.StatusServiceUnavailable,
+			sc == http.StatusGatewayTimeout:
 			// Retry-able failure.
 			rErr = newResponseError(resp.Header)
 
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go
index 36075c19ee5..a4ead01c1f1 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go
@@ -31,6 +31,7 @@ import (
 	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest"
 	"go.opentelemetry.io/otel/sdk/metric"
 	"go.opentelemetry.io/otel/sdk/metric/metricdata"
+	mpb "go.opentelemetry.io/proto/otlp/metrics/v1"
 )
 
 type clientShim struct {
@@ -40,9 +41,11 @@ type clientShim struct {
 func (clientShim) Temporality(metric.InstrumentKind) metricdata.Temporality {
 	return metricdata.CumulativeTemporality
 }
+
 func (clientShim) Aggregation(metric.InstrumentKind) metric.Aggregation {
 	return nil
 }
+
 func (clientShim) ForceFlush(ctx context.Context) error {
 	return ctx.Err()
 }
@@ -63,6 +66,30 @@ func TestClient(t *testing.T) {
 	t.Run("Integration", otest.RunClientTests(factory))
 }
 
+func TestClientWithHTTPCollectorRespondingPlainText(t *testing.T) {
+	ctx := context.Background()
+	coll, err := otest.NewHTTPCollector("", nil, otest.WithHTTPCollectorRespondingPlainText())
+	require.NoError(t, err)
+
+	addr := coll.Addr().String()
+	opts := []Option{WithEndpoint(addr), WithInsecure()}
+	cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...)
+	client, err := newClient(cfg)
+	require.NoError(t, err)
+
+	require.NoError(t, client.UploadMetrics(ctx, &mpb.ResourceMetrics{}))
+	require.NoError(t, client.Shutdown(ctx))
+	got := coll.Collect().Dump()
+	require.Len(t, got, 1, "upload of one ResourceMetrics")
+}
+
+func TestNewWithInvalidEndpoint(t *testing.T) {
+	ctx := context.Background()
+	exp, err := New(ctx, WithEndpoint("host:invalid-port"))
+	assert.Error(t, err)
+	assert.Nil(t, exp)
+}
+
 func TestConfig(t *testing.T) {
 	factoryFunc := func(ePt string, rCh <-chan otest.ExportResult, o ...Option) (metric.Exporter, *otest.HTTPCollector) {
 		coll, err := otest.NewHTTPCollector(ePt, rCh)
@@ -91,7 +118,7 @@ func TestConfig(t *testing.T) {
 		require.NoError(t, exp.Shutdown(ctx))
 
 		got := coll.Headers()
-		require.Regexp(t, "OTel OTLP Exporter Go/[01]\\..*", got)
+		require.Regexp(t, "OTel Go OTLP over HTTP/protobuf metrics exporter/[01]\\..*", got)
 		require.Contains(t, got, key)
 		assert.Equal(t, got[key], []string{headers[key]})
 	})
@@ -111,7 +138,7 @@ func TestConfig(t *testing.T) {
 		t.Cleanup(func() { close(rCh) })
 		t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
 		err := exp.Export(ctx, &metricdata.ResourceMetrics{})
-		assert.ErrorContains(t, err, context.DeadlineExceeded.Error())
+		assert.ErrorAs(t, err, new(retryableError))
 	})
 
 	t.Run("WithCompressionGZip", func(t *testing.T) {
@@ -125,9 +152,9 @@ func TestConfig(t *testing.T) {
 
 	t.Run("WithRetry", func(t *testing.T) {
 		emptyErr := errors.New("")
-		rCh := make(chan otest.ExportResult, 3)
+		rCh := make(chan otest.ExportResult, 5)
 		header := http.Header{http.CanonicalHeaderKey("Retry-After"): {"10"}}
-		// Both retryable errors.
+		// All retryable errors.
 		rCh <- otest.ExportResult{Err: &otest.HTTPResponseError{
 			Status: http.StatusServiceUnavailable,
 			Err:    emptyErr,
@@ -137,6 +164,14 @@ func TestConfig(t *testing.T) {
 			Status: http.StatusTooManyRequests,
 			Err:    emptyErr,
 		}}
+		rCh <- otest.ExportResult{Err: &otest.HTTPResponseError{
+			Status: http.StatusGatewayTimeout,
+			Err:    emptyErr,
+		}}
+		rCh <- otest.ExportResult{Err: &otest.HTTPResponseError{
+			Status: http.StatusBadGateway,
+			Err:    emptyErr,
+		}}
 		rCh <- otest.ExportResult{}
 		exp, coll := factoryFunc("", rCh, WithRetry(RetryConfig{
 			Enabled:         true,
@@ -164,17 +199,6 @@ func TestConfig(t *testing.T) {
 		assert.Len(t, coll.Collect().Dump(), 1)
 	})
 
-	t.Run("WithURLPath", func(t *testing.T) {
-		path := "/prefix/v2/metrics"
-		ePt := fmt.Sprintf("http://localhost:0%s", path)
-		exp, coll := factoryFunc(ePt, nil, WithURLPath(path))
-		ctx := context.Background()
-		t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) })
-		t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) })
-		assert.NoError(t, exp.Export(ctx, &metricdata.ResourceMetrics{}))
-		assert.Len(t, coll.Collect().Dump(), 1)
-	})
-
 	t.Run("WithTLSClientConfig", func(t *testing.T) {
 		ePt := "https://localhost:0"
 		tlsCfg := &tls.Config{InsecureSkipVerify: true}
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
index a49e2465171..94f8b250d3f 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go
@@ -12,7 +12,82 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package otlpmetrichttp provides an otlpmetric.Exporter that communicates
-// with an OTLP receiving endpoint using protobuf encoded metric data over
-// HTTP.
+/*
+Package otlpmetrichttp provides an OTLP metrics exporter using HTTP with protobuf payloads.
+By default the telemetry is sent to https://localhost:4318/v1/metrics.
+
+Exporter should be created using [New] and used with a [metric.PeriodicReader].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") -
+target base URL ("/v1/metrics" is appended) to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
+environment variable and by [WithEndpoint], [WithInsecure] options.
+
+OTEL_EXPORTER_OTLP_METRICS_ENDPOINT (default: "https://localhost:4318/v1/metrics") -
+target URL to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by [WithEndpoint], [WitnInsecure], [WithURLPath] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS (default: none) -
+key-value pairs used as headers associated with HTTP requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_METRICS_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_METRICS_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION (default: none) -
+compression strategy the exporter uses to compress the HTTP body.
+Supported values: "gzip".
+OTEL_EXPORTER_OTLP_METRICS_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompression] option.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE (default: none) -
+filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE (default: none) -
+filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY (default: none) -
+filepath to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE (default: "cumulative") -
+aggregation temporality to use on the basis of instrument kind. Supported values:
+  - "cumulative" - Cumulative aggregation temporality for all instrument kinds,
+  - "delta" - Delta aggregation temporality for Counter, Asynchronous Counter and Histogram instrument kinds;
+    Cumulative aggregation for UpDownCounter and Asynchronous UpDownCounter instrument kinds,
+  - "lowmemory" - Delta aggregation temporality for Synchronous Counter and Histogram instrument kinds;
+    Cumulative aggregation temporality for Synchronous UpDownCounter, Asynchronous Counter, and Asynchronous UpDownCounter instrument kinds.
+
+The configuration can be overridden by [WithTemporalitySelector] option.
+
+OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION (default: "explicit_bucket_histogram") -
+default aggregation to use for histogram instruments. Supported values:
+  - "explicit_bucket_histogram" - [Explicit Bucket Histogram Aggregation],
+  - "base2_exponential_bucket_histogram" - [Base2 Exponential Bucket Histogram Aggregation].
+
+The configuration can be overridden by [WithAggregationSelector] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+[Explicit Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#explicit-bucket-histogram-aggregation
+[Base2 Exponential Bucket Histogram Aggregation]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.26.0/specification/metrics/sdk.md#base2-exponential-bucket-histogram-aggregation
+*/
 package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod
index 9c6c5c1e784..812e7a78fde 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod
@@ -1,36 +1,37 @@
 module go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
 
-go 1.19
+go 1.20
 
 retract v0.32.2 // Contains unresolvable dependencies.
 
 require (
 	github.com/cenkalti/backoff/v4 v4.2.1
-	github.com/google/go-cmp v0.5.9
+	github.com/google/go-cmp v0.6.0
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.39.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/sdk/metric v0.39.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/sdk/metric v1.21.0
 	go.opentelemetry.io/proto/otlp v1.0.0
-	google.golang.org/grpc v1.57.0
+	google.golang.org/grpc v1.59.0
 	google.golang.org/protobuf v1.31.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+	github.com/kr/text v0.2.0 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/net v0.10.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	golang.org/x/text v0.9.0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
+	github.com/rogpeppe/go-internal v1.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
+	golang.org/x/net v0.17.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+	golang.org/x/text v0.13.0 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
@@ -40,10 +41,6 @@ replace go.opentelemetry.io/otel/sdk => ../../../../sdk
 
 replace go.opentelemetry.io/otel/sdk/metric => ../../../../sdk/metric
 
-replace go.opentelemetry.io/otel/exporters/otlp/otlpmetric => ../
-
 replace go.opentelemetry.io/otel/metric => ../../../../metric
 
 replace go.opentelemetry.io/otel/trace => ../../../../trace
-
-replace go.opentelemetry.io/otel/exporters/otlp/internal/retry => ../../internal/retry
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum
index 9397989cf3d..b3410b5f2ff 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum
@@ -1,44 +1,47 @@
 github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
 github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
+github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
 go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
-google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
-google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
index 38859fb9342..9dfb55c41bb 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig.go
@@ -174,13 +174,13 @@ func stringToHeader(value string) map[string]string {
 			global.Error(errors.New("missing '="), "parse headers", "input", header)
 			continue
 		}
-		name, err := url.QueryUnescape(n)
+		name, err := url.PathUnescape(n)
 		if err != nil {
 			global.Error(err, "escape header key", "key", n)
 			continue
 		}
 		trimmedName := strings.TrimSpace(name)
-		value, err := url.QueryUnescape(v)
+		value, err := url.PathUnescape(v)
 		if err != nil {
 			global.Error(err, "escape header value", "value", v)
 			continue
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig_test.go
index cec506208d5..6cbe0c7ab11 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/envconfig/envconfig_test.go
@@ -427,7 +427,12 @@ func TestStringToHeader(t *testing.T) {
 			want:  map[string]string{"userId": "alice"},
 		},
 		{
-			name:  "multiples headers encoded",
+			name:  "simple header conforms to RFC 3986 spec",
+			value: " userId = alice+test ",
+			want:  map[string]string{"userId": "alice+test"},
+		},
+		{
+			name:  "multiple headers encoded",
 			value: "userId=alice,serverNode=DF%3A28,isProduction=false",
 			want: map[string]string{
 				"userId":       "alice",
@@ -435,6 +440,16 @@ func TestStringToHeader(t *testing.T) {
 				"isProduction": "false",
 			},
 		},
+		{
+			name:  "multiple headers encoded per RFC 3986 spec",
+			value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test",
+			want: map[string]string{
+				"userId":       "alice+test",
+				"serverNode":   "DF:28",
+				"isProduction": "false",
+				"namespace":    "localhost/test",
+			},
+		},
 		{
 			name:  "invalid headers format",
 			value: "userId:alice",
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
index c1ec5ed210a..59468b9a5ed 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options.go
@@ -30,7 +30,6 @@ import (
 	"google.golang.org/grpc/credentials/insecure"
 	"google.golang.org/grpc/encoding/gzip"
 
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
 	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal/retry"
 	"go.opentelemetry.io/otel/sdk/metric"
 )
@@ -122,7 +121,6 @@ func cleanPath(urlPath string, defaultPath string) string {
 // NewGRPCConfig returns a new Config with all settings applied from opts and
 // any unset setting using the default gRPC config values.
 func NewGRPCConfig(opts ...GRPCOption) Config {
-	userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
 	cfg := Config{
 		Metrics: SignalConfig{
 			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
@@ -134,7 +132,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
 			AggregationSelector: metric.DefaultAggregationSelector,
 		},
 		RetryConfig: retry.DefaultConfig,
-		DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
 	}
 	cfg = ApplyGRPCEnvConfigs(cfg)
 	for _, opt := range opts {
@@ -158,9 +155,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
 	if cfg.Metrics.Compression == GzipCompression {
 		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
 	}
-	if len(cfg.DialOptions) != 0 {
-		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
-	}
 	if cfg.ReconnectionPeriod != 0 {
 		p := grpc.ConnectParams{
 			Backoff:           backoff.DefaultConfig,
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options_test.go
index bb34337a1f2..1da4a6f2f63 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/oconf/options_test.go
@@ -203,7 +203,7 @@ func TestConfigs(t *testing.T) {
 			},
 			asserts: func(t *testing.T, c *Config, grpcOption bool) {
 				if grpcOption {
-					//TODO: make sure gRPC's credentials actually works
+					// TODO: make sure gRPC's credentials actually works
 					assert.NotNil(t, c.Metrics.GRPCCredentials)
 				} else {
 					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client.go
index 4169c9d964d..41374b956a4 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/client.go
@@ -38,7 +38,7 @@ import (
 
 var (
 	// Sat Jan 01 2000 00:00:00 GMT+0000.
-	start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
+	start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0))
 	end   = start.Add(30 * time.Second)
 
 	kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/collector.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/collector.go
index 0b6b9387167..6398f8ba5ba 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/collector.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/otest/collector.go
@@ -195,6 +195,8 @@ func (e *HTTPResponseError) Unwrap() error { return e.Err }
 
 // HTTPCollector is an OTLP HTTP server that collects all requests it receives.
 type HTTPCollector struct {
+	plainTextResponse bool
+
 	headersMu sync.Mutex
 	headers   http.Header
 	storage   *Storage
@@ -217,7 +219,7 @@ type HTTPCollector struct {
 // If errCh is not nil, the collector will respond to HTTP requests with errors
 // sent on that channel. This means that if errCh is not nil Export calls will
 // block until an error is received.
-func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPCollector, error) {
+func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult, opts ...func(*HTTPCollector)) (*HTTPCollector, error) {
 	u, err := url.Parse(endpoint)
 	if err != nil {
 		return nil, err
@@ -234,6 +236,9 @@ func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPColle
 		storage:  NewStorage(),
 		resultCh: resultCh,
 	}
+	for _, opt := range opts {
+		opt(c)
+	}
 
 	c.listener, err = net.Listen("tcp", u.Host)
 	if err != nil {
@@ -242,7 +247,11 @@ func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPColle
 
 	mux := http.NewServeMux()
 	mux.Handle(u.Path, http.HandlerFunc(c.handler))
-	c.srv = &http.Server{Handler: mux}
+	c.srv = &http.Server{
+		Handler:      mux,
+		ReadTimeout:  10 * time.Second,
+		WriteTimeout: 10 * time.Second,
+	}
 	if u.Scheme == "https" {
 		cert, err := weakCertificate()
 		if err != nil {
@@ -258,6 +267,14 @@ func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPColle
 	return c, nil
 }
 
+// WithHTTPCollectorRespondingPlainText makes the HTTPCollector return
+// a plaintext, instead of protobuf, response.
+func WithHTTPCollectorRespondingPlainText() func(*HTTPCollector) {
+	return func(s *HTTPCollector) {
+		s.plainTextResponse = true
+	}
+}
+
 // Shutdown shuts down the HTTP server closing all open connections and
 // listeners.
 func (c *HTTPCollector) Shutdown(ctx context.Context) error {
@@ -378,6 +395,13 @@ func (c *HTTPCollector) respond(w http.ResponseWriter, resp ExportResult) {
 		return
 	}
 
+	if c.plainTextResponse {
+		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+		w.WriteHeader(http.StatusOK)
+		_, _ = w.Write([]byte("OK"))
+		return
+	}
+
 	w.Header().Set("Content-Type", "application/x-protobuf")
 	w.WriteHeader(http.StatusOK)
 	if resp.Response == nil {
diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata_test.go
index b94c48dae8d..676e5785633 100644
--- a/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/internal/transform/metricdata_test.go
@@ -40,7 +40,7 @@ type unknownAggT struct {
 
 var (
 	// Sat Jan 01 2000 00:00:00 GMT+0000.
-	start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
+	start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0))
 	end   = start.Add(30 * time.Second)
 
 	alice = attribute.NewSet(attribute.String("user", "alice"))
diff --git a/exporters/otlp/otlpmetric/internal/header_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
similarity index 70%
rename from exporters/otlp/otlpmetric/internal/header_test.go
rename to exporters/otlp/otlpmetric/otlpmetrichttp/version.go
index 32fc4952970..59d7c1c2cb9 100644
--- a/exporters/otlp/otlpmetric/internal/header_test.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/version.go
@@ -12,14 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package internal
+package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp"
 
-import (
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func TestGetUserAgentHeader(t *testing.T) {
-	require.Regexp(t, "OTel OTLP Exporter Go/[01]\\..*", GetUserAgentHeader())
+// Version is the current release version of the OpenTelemetry OTLP over HTTP/protobuf metrics exporter in use.
+func Version() string {
+	return "0.44.0"
 }
diff --git a/exporters/jaeger/internal/matchers/expecter.go b/exporters/otlp/otlpmetric/otlpmetrichttp/version_test.go
similarity index 58%
rename from exporters/jaeger/internal/matchers/expecter.go
rename to exporters/otlp/otlpmetric/otlpmetrichttp/version_test.go
index 54f331025d8..73091976fcc 100644
--- a/exporters/jaeger/internal/matchers/expecter.go
+++ b/exporters/otlp/otlpmetric/otlpmetrichttp/version_test.go
@@ -1,6 +1,3 @@
-// Code created by gotmpl. DO NOT MODIFY.
-// source: internal/shared/matchers/expecter.go.tmpl
-
 // Copyright The OpenTelemetry Authors
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
@@ -15,25 +12,21 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package matchers // import "go.opentelemetry.io/otel/exporters/jaeger/internal/matchers"
+package otlpmetrichttp
 
 import (
+	"regexp"
 	"testing"
-)
 
-type Expecter struct {
-	t *testing.T
-}
+	"github.com/stretchr/testify/assert"
+)
 
-func NewExpecter(t *testing.T) *Expecter {
-	return &Expecter{
-		t: t,
-	}
-}
+// regex taken from https://github.com/Masterminds/semver/tree/v3.1.1
+var versionRegex = regexp.MustCompile(`^v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
+	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$`)
 
-func (a *Expecter) Expect(actual interface{}) *Expectation {
-	return &Expectation{
-		t:      a.t,
-		actual: actual,
-	}
+func TestVersionSemver(t *testing.T) {
+	v := Version()
+	assert.NotNil(t, versionRegex.FindStringSubmatch(v), "version is not semver: %s", v)
 }
diff --git a/exporters/otlp/otlptrace/README.md b/exporters/otlp/otlptrace/README.md
deleted file mode 100644
index 50295223182..00000000000
--- a/exporters/otlp/otlptrace/README.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# OpenTelemetry-Go OTLP Span Exporter
-
-[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
-
-[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md) implementation.
-
-## Installation
-
-```
-go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace
-```
-
-## Examples
-
-- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go)
-- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector)
-
-## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
-
-The `otlptrace` package provides an exporter implementing the OTel span exporter interface.
-This exporter is configured using a client satisfying the `otlptrace.Client` interface.
-This client handles the transformation of data into wire format and the transmission of that data to the collector.
-
-## [`otlptracegrpc`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
-
-The `otlptracegrpc` package implements a client for the span exporter that sends trace telemetry data to the collector using gRPC with protobuf-encoded payloads.
-
-## [`otlptracehttp`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)
-
-The `otlptracehttp` package implements a client for the span exporter that sends trace telemetry data to the collector using HTTP with protobuf-encoded payloads.
-
-## Configuration
-
-### Environment Variables
-
-The following environment variables can be used (instead of options objects) to
-override the default configuration. For more information about how each of
-these environment variables is interpreted, see [the OpenTelemetry
-specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md).
-
-| Environment variable                                                     | Option                        | Default value                                            |
-| ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- |
-| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`       | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] |
-| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig`         |                                                          |
-| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS`         | `WithHeaders`                 |                                                          |
-| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression`             |                                                          |
-| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`         | `WithTimeout`                 | `10s`                                                    |
-
-[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`.
-
-Configuration using options have precedence over the environment variables.
diff --git a/exporters/otlp/internal/envconfig/doc.go b/exporters/otlp/otlptrace/doc.go
similarity index 63%
rename from exporters/otlp/internal/envconfig/doc.go
rename to exporters/otlp/otlptrace/doc.go
index 327794a5bea..9e642235ade 100644
--- a/exporters/otlp/internal/envconfig/doc.go
+++ b/exporters/otlp/otlptrace/doc.go
@@ -12,9 +12,10 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package envconfig contains common functionality for all OTLP exporter
-// configuration.
-//
-// Deprecated: package envconfig exists for historical compatibility, it should
-// not be used.
-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig"
+/*
+Package otlptrace contains abstractions for OTLP span exporters.
+See the official OTLP span exporter implementations:
+  - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc],
+  - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp].
+*/
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
diff --git a/exporters/otlp/otlptrace/exporter.go b/exporters/otlp/otlptrace/exporter.go
index 0dbe15555b3..b46a38d60ab 100644
--- a/exporters/otlp/otlptrace/exporter.go
+++ b/exporters/otlp/otlptrace/exporter.go
@@ -24,9 +24,7 @@ import (
 	tracesdk "go.opentelemetry.io/otel/sdk/trace"
 )
 
-var (
-	errAlreadyStarted = errors.New("already started")
-)
+var errAlreadyStarted = errors.New("already started")
 
 // Exporter exports trace data in the OTLP wire format.
 type Exporter struct {
@@ -55,7 +53,7 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan)
 
 // Start establishes a connection to the receiving endpoint.
 func (e *Exporter) Start(ctx context.Context) error {
-	var err = errAlreadyStarted
+	err := errAlreadyStarted
 	e.startOnce.Do(func() {
 		e.mu.Lock()
 		e.started = true
diff --git a/exporters/otlp/otlptrace/go.mod b/exporters/otlp/otlptrace/go.mod
index 9b9d3815751..8b62d053be6 100644
--- a/exporters/otlp/otlptrace/go.mod
+++ b/exporters/otlp/otlptrace/go.mod
@@ -1,34 +1,27 @@
 module go.opentelemetry.io/otel/exporters/otlp/otlptrace
 
-go 1.19
+go 1.20
 
 require (
-	github.com/cenkalti/backoff/v4 v4.2.1
-	github.com/google/go-cmp v0.5.9
+	github.com/google/go-cmp v0.6.0
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 	go.opentelemetry.io/proto/otlp v1.0.0
-	google.golang.org/grpc v1.57.0
 	google.golang.org/protobuf v1.31.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
-	github.com/golang/protobuf v1.5.3 // indirect
-	github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
-	github.com/kr/text v0.2.0 // indirect
+	github.com/kr/pretty v0.3.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
 	github.com/rogpeppe/go-internal v1.10.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/net v0.10.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	golang.org/x/text v0.9.0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+	gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/exporters/otlp/otlptrace/go.sum b/exporters/otlp/otlptrace/go.sum
index f6d29fdda91..6cc7fe2eb38 100644
--- a/exporters/otlp/otlptrace/go.sum
+++ b/exporters/otlp/otlptrace/go.sum
@@ -1,52 +1,40 @@
-github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
-github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
-github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
 github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
 github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
 go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
-google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
-google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
 google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/exporters/otlp/otlptrace/internal/doc.go b/exporters/otlp/otlptrace/internal/doc.go
deleted file mode 100644
index d1c019d8bb2..00000000000
--- a/exporters/otlp/otlptrace/internal/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package internal contains common functionality for all OTLP trace exporters.
-//
-// Deprecated: package internal exists for historical compatibility, it should
-// not be used.
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
diff --git a/exporters/otlp/otlptrace/internal/envconfig/envconfig.go b/exporters/otlp/otlptrace/internal/envconfig/envconfig.go
deleted file mode 100644
index 04d87af9e59..00000000000
--- a/exporters/otlp/otlptrace/internal/envconfig/envconfig.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig"
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"errors"
-	"fmt"
-	"net/url"
-	"strconv"
-	"strings"
-	"time"
-
-	"go.opentelemetry.io/otel/internal/global"
-)
-
-// ConfigFn is the generic function used to set a config.
-type ConfigFn func(*EnvOptionsReader)
-
-// EnvOptionsReader reads the required environment variables.
-type EnvOptionsReader struct {
-	GetEnv    func(string) string
-	ReadFile  func(string) ([]byte, error)
-	Namespace string
-}
-
-// Apply runs every ConfigFn.
-func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
-	for _, o := range opts {
-		o(e)
-	}
-}
-
-// GetEnvValue gets an OTLP environment variable value of the specified key
-// using the GetEnv function.
-// This function prepends the OTLP specified namespace to all key lookups.
-func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
-	v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
-	return v, v != ""
-}
-
-// WithString retrieves the specified config and passes it to ConfigFn as a string.
-func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			fn(v)
-		}
-	}
-}
-
-// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
-func WithBool(n string, fn func(bool)) ConfigFn {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			b := strings.ToLower(v) == "true"
-			fn(b)
-		}
-	}
-}
-
-// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
-func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			d, err := strconv.Atoi(v)
-			if err != nil {
-				global.Error(err, "parse duration", "input", v)
-				return
-			}
-			fn(time.Duration(d) * time.Millisecond)
-		}
-	}
-}
-
-// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
-func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			fn(stringToHeader(v))
-		}
-	}
-}
-
-// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
-func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			u, err := url.Parse(v)
-			if err != nil {
-				global.Error(err, "parse url", "input", v)
-				return
-			}
-			fn(u)
-		}
-	}
-}
-
-// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
-func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
-	return func(e *EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			b, err := e.ReadFile(v)
-			if err != nil {
-				global.Error(err, "read tls ca cert file", "file", v)
-				return
-			}
-			c, err := createCertPool(b)
-			if err != nil {
-				global.Error(err, "create tls cert pool")
-				return
-			}
-			fn(c)
-		}
-	}
-}
-
-// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
-func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
-	return func(e *EnvOptionsReader) {
-		vc, okc := e.GetEnvValue(nc)
-		vk, okk := e.GetEnvValue(nk)
-		if !okc || !okk {
-			return
-		}
-		cert, err := e.ReadFile(vc)
-		if err != nil {
-			global.Error(err, "read tls client cert", "file", vc)
-			return
-		}
-		key, err := e.ReadFile(vk)
-		if err != nil {
-			global.Error(err, "read tls client key", "file", vk)
-			return
-		}
-		crt, err := tls.X509KeyPair(cert, key)
-		if err != nil {
-			global.Error(err, "create tls client key pair")
-			return
-		}
-		fn(crt)
-	}
-}
-
-func keyWithNamespace(ns, key string) string {
-	if ns == "" {
-		return key
-	}
-	return fmt.Sprintf("%s_%s", ns, key)
-}
-
-func stringToHeader(value string) map[string]string {
-	headersPairs := strings.Split(value, ",")
-	headers := make(map[string]string)
-
-	for _, header := range headersPairs {
-		n, v, found := strings.Cut(header, "=")
-		if !found {
-			global.Error(errors.New("missing '="), "parse headers", "input", header)
-			continue
-		}
-		name, err := url.QueryUnescape(n)
-		if err != nil {
-			global.Error(err, "escape header key", "key", n)
-			continue
-		}
-		trimmedName := strings.TrimSpace(name)
-		value, err := url.QueryUnescape(v)
-		if err != nil {
-			global.Error(err, "escape header value", "value", v)
-			continue
-		}
-		trimmedValue := strings.TrimSpace(value)
-
-		headers[trimmedName] = trimmedValue
-	}
-
-	return headers
-}
-
-func createCertPool(certBytes []byte) (*x509.CertPool, error) {
-	cp := x509.NewCertPool()
-	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
-		return nil, errors.New("failed to append certificate to the cert pool")
-	}
-	return cp, nil
-}
diff --git a/exporters/otlp/otlptrace/internal/envconfig/envconfig_test.go b/exporters/otlp/otlptrace/internal/envconfig/envconfig_test.go
deleted file mode 100644
index 0c959a1f3fd..00000000000
--- a/exporters/otlp/otlptrace/internal/envconfig/envconfig_test.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package envconfig
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"errors"
-	"net/url"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-)
-
-const WeakKey = `
------BEGIN EC PRIVATE KEY-----
-MHcCAQEEIEbrSPmnlSOXvVzxCyv+VR3a0HDeUTvOcqrdssZ2k4gFoAoGCCqGSM49
-AwEHoUQDQgAEDMTfv75J315C3K9faptS9iythKOMEeV/Eep73nWX531YAkmmwBSB
-2dXRD/brsgLnfG57WEpxZuY7dPRbxu33BA==
------END EC PRIVATE KEY-----
-`
-
-const WeakCertificate = `
------BEGIN CERTIFICATE-----
-MIIBjjCCATWgAwIBAgIUKQSMC66MUw+kPp954ZYOcyKAQDswCgYIKoZIzj0EAwIw
-EjEQMA4GA1UECgwHb3RlbC1nbzAeFw0yMjEwMTkwMDA5MTlaFw0yMzEwMTkwMDA5
-MTlaMBIxEDAOBgNVBAoMB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
-AAQMxN+/vknfXkLcr19qm1L2LK2Eo4wR5X8R6nvedZfnfVgCSabAFIHZ1dEP9uuy
-Aud8bntYSnFm5jt09FvG7fcEo2kwZzAdBgNVHQ4EFgQUicGuhnTTkYLZwofXMNLK
-SHFeCWgwHwYDVR0jBBgwFoAUicGuhnTTkYLZwofXMNLKSHFeCWgwDwYDVR0TAQH/
-BAUwAwEB/zAUBgNVHREEDTALgglsb2NhbGhvc3QwCgYIKoZIzj0EAwIDRwAwRAIg
-Lfma8FnnxeSOi6223AsFfYwsNZ2RderNsQrS0PjEHb0CIBkrWacqARUAu7uT4cGu
-jVcIxYQqhId5L8p/mAv2PWZS
------END CERTIFICATE-----
-`
-
-type testOption struct {
-	TestString   string
-	TestBool     bool
-	TestDuration time.Duration
-	TestHeaders  map[string]string
-	TestURL      *url.URL
-	TestTLS      *tls.Config
-}
-
-func TestEnvConfig(t *testing.T) {
-	parsedURL, err := url.Parse("https://example.com")
-	assert.NoError(t, err)
-
-	options := []testOption{}
-	for _, testcase := range []struct {
-		name            string
-		reader          EnvOptionsReader
-		configs         []ConfigFn
-		expectedOptions []testOption
-	}{
-		{
-			name: "with no namespace and a matching key",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithString("HELLO", func(v string) {
-					options = append(options, testOption{TestString: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestString: "world",
-				},
-			},
-		},
-		{
-			name: "with no namespace and a non-matching key",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithString("HOLA", func(v string) {
-					options = append(options, testOption{TestString: v})
-				}),
-			},
-			expectedOptions: []testOption{},
-		},
-		{
-			name: "with a namespace and a matching key",
-			reader: EnvOptionsReader{
-				Namespace: "MY_NAMESPACE",
-				GetEnv: func(n string) string {
-					if n == "MY_NAMESPACE_HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithString("HELLO", func(v string) {
-					options = append(options, testOption{TestString: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestString: "world",
-				},
-			},
-		},
-		{
-			name: "with no namespace and a non-matching key",
-			reader: EnvOptionsReader{
-				Namespace: "MY_NAMESPACE",
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithString("HELLO", func(v string) {
-					options = append(options, testOption{TestString: v})
-				}),
-			},
-			expectedOptions: []testOption{},
-		},
-		{
-			name: "with a bool config",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "true"
-					} else if n == "WORLD" {
-						return "false"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithBool("HELLO", func(b bool) {
-					options = append(options, testOption{TestBool: b})
-				}),
-				WithBool("WORLD", func(b bool) {
-					options = append(options, testOption{TestBool: b})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestBool: true,
-				},
-				{
-					TestBool: false,
-				},
-			},
-		},
-		{
-			name: "with an invalid bool config",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithBool("HELLO", func(b bool) {
-					options = append(options, testOption{TestBool: b})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestBool: false,
-				},
-			},
-		},
-		{
-			name: "with a duration config",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "60"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithDuration("HELLO", func(v time.Duration) {
-					options = append(options, testOption{TestDuration: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestDuration: 60_000_000, // 60 milliseconds
-				},
-			},
-		},
-		{
-			name: "with an invalid duration config",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithDuration("HELLO", func(v time.Duration) {
-					options = append(options, testOption{TestDuration: v})
-				}),
-			},
-			expectedOptions: []testOption{},
-		},
-		{
-			name: "with headers",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "userId=42,userName=alice"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithHeaders("HELLO", func(v map[string]string) {
-					options = append(options, testOption{TestHeaders: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestHeaders: map[string]string{
-						"userId":   "42",
-						"userName": "alice",
-					},
-				},
-			},
-		},
-		{
-			name: "with invalid headers",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "world"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithHeaders("HELLO", func(v map[string]string) {
-					options = append(options, testOption{TestHeaders: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestHeaders: map[string]string{},
-				},
-			},
-		},
-		{
-			name: "with URL",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "https://example.com"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithURL("HELLO", func(v *url.URL) {
-					options = append(options, testOption{TestURL: v})
-				}),
-			},
-			expectedOptions: []testOption{
-				{
-					TestURL: parsedURL,
-				},
-			},
-		},
-		{
-			name: "with invalid URL",
-			reader: EnvOptionsReader{
-				GetEnv: func(n string) string {
-					if n == "HELLO" {
-						return "i nvalid://url"
-					}
-					return ""
-				},
-			},
-			configs: []ConfigFn{
-				WithURL("HELLO", func(v *url.URL) {
-					options = append(options, testOption{TestURL: v})
-				}),
-			},
-			expectedOptions: []testOption{},
-		},
-	} {
-		t.Run(testcase.name, func(t *testing.T) {
-			testcase.reader.Apply(testcase.configs...)
-			assert.Equal(t, testcase.expectedOptions, options)
-			options = []testOption{}
-		})
-	}
-}
-
-func TestWithTLSConfig(t *testing.T) {
-	pool, err := createCertPool([]byte(WeakCertificate))
-	assert.NoError(t, err)
-
-	reader := EnvOptionsReader{
-		GetEnv: func(n string) string {
-			if n == "CERTIFICATE" {
-				return "/path/cert.pem"
-			}
-			return ""
-		},
-		ReadFile: func(p string) ([]byte, error) {
-			if p == "/path/cert.pem" {
-				return []byte(WeakCertificate), nil
-			}
-			return []byte{}, nil
-		},
-	}
-
-	var option testOption
-	reader.Apply(
-		WithCertPool("CERTIFICATE", func(cp *x509.CertPool) {
-			option = testOption{TestTLS: &tls.Config{RootCAs: cp}}
-		}),
-	)
-
-	// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-	assert.Equal(t, pool.Subjects(), option.TestTLS.RootCAs.Subjects())
-}
-
-func TestWithClientCert(t *testing.T) {
-	cert, err := tls.X509KeyPair([]byte(WeakCertificate), []byte(WeakKey))
-	assert.NoError(t, err)
-
-	reader := EnvOptionsReader{
-		GetEnv: func(n string) string {
-			switch n {
-			case "CLIENT_CERTIFICATE":
-				return "/path/tls.crt"
-			case "CLIENT_KEY":
-				return "/path/tls.key"
-			}
-			return ""
-		},
-		ReadFile: func(n string) ([]byte, error) {
-			switch n {
-			case "/path/tls.crt":
-				return []byte(WeakCertificate), nil
-			case "/path/tls.key":
-				return []byte(WeakKey), nil
-			}
-			return []byte{}, nil
-		},
-	}
-
-	var option testOption
-	reader.Apply(
-		WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) {
-			option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}}
-		}),
-	)
-	assert.Equal(t, cert, option.TestTLS.Certificates[0])
-
-	reader.ReadFile = func(s string) ([]byte, error) { return nil, errors.New("oops") }
-	option.TestTLS = nil
-	reader.Apply(
-		WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) {
-			option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}}
-		}),
-	)
-	assert.Nil(t, option.TestTLS)
-
-	reader.GetEnv = func(s string) string { return "" }
-	option.TestTLS = nil
-	reader.Apply(
-		WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) {
-			option = testOption{TestTLS: &tls.Config{Certificates: []tls.Certificate{c}}}
-		}),
-	)
-	assert.Nil(t, option.TestTLS)
-}
-
-func TestStringToHeader(t *testing.T) {
-	tests := []struct {
-		name  string
-		value string
-		want  map[string]string
-	}{
-		{
-			name:  "simple test",
-			value: "userId=alice",
-			want:  map[string]string{"userId": "alice"},
-		},
-		{
-			name:  "simple test with spaces",
-			value: " userId = alice  ",
-			want:  map[string]string{"userId": "alice"},
-		},
-		{
-			name:  "multiples headers encoded",
-			value: "userId=alice,serverNode=DF%3A28,isProduction=false",
-			want: map[string]string{
-				"userId":       "alice",
-				"serverNode":   "DF:28",
-				"isProduction": "false",
-			},
-		},
-		{
-			name:  "invalid headers format",
-			value: "userId:alice",
-			want:  map[string]string{},
-		},
-		{
-			name:  "invalid key",
-			value: "%XX=missing,userId=alice",
-			want: map[string]string{
-				"userId": "alice",
-			},
-		},
-		{
-			name:  "invalid value",
-			value: "missing=%XX,userId=alice",
-			want: map[string]string{
-				"userId": "alice",
-			},
-		},
-	}
-
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			assert.Equal(t, tt.want, stringToHeader(tt.value))
-		})
-	}
-}
diff --git a/exporters/otlp/otlptrace/internal/header.go b/exporters/otlp/otlptrace/internal/header.go
deleted file mode 100644
index 65694a9019a..00000000000
--- a/exporters/otlp/otlptrace/internal/header.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
-
-import (
-	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
-)
-
-// GetUserAgentHeader returns an OTLP header value form "OTel OTLP Exporter Go/{ .Version }"
-// https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/protocol/exporter.md#user-agent
-func GetUserAgentHeader() string {
-	return "OTel OTLP Exporter Go/" + otlptrace.Version()
-}
diff --git a/exporters/otlp/otlptrace/internal/otlpconfig/doc.go b/exporters/otlp/otlptrace/internal/otlpconfig/doc.go
deleted file mode 100644
index 41047e4de95..00000000000
--- a/exporters/otlp/otlptrace/internal/otlpconfig/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package otlpconfig contains common functionality for configuring all OTLP
-// trace exporters.
-//
-// Deprecated: package otlpconfig exists for historical compatibility, it
-// should not be used.
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
diff --git a/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go
deleted file mode 100644
index 1b9ecf6f949..00000000000
--- a/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"net/url"
-	"os"
-	"path"
-	"strings"
-	"time"
-
-	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig" // nolint: staticcheck  // Atomic deprecation.
-)
-
-// DefaultEnvOptionsReader is the default environments reader.
-var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
-	GetEnv:    os.Getenv,
-	ReadFile:  os.ReadFile,
-	Namespace: "OTEL_EXPORTER_OTLP",
-}
-
-// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
-func ApplyGRPCEnvConfigs(cfg Config) Config {
-	opts := getOptionsFromEnv()
-	for _, opt := range opts {
-		cfg = opt.ApplyGRPCOption(cfg)
-	}
-	return cfg
-}
-
-// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
-func ApplyHTTPEnvConfigs(cfg Config) Config {
-	opts := getOptionsFromEnv()
-	for _, opt := range opts {
-		cfg = opt.ApplyHTTPOption(cfg)
-	}
-	return cfg
-}
-
-func getOptionsFromEnv() []GenericOption {
-	opts := []GenericOption{}
-
-	tlsConf := &tls.Config{}
-	DefaultEnvOptionsReader.Apply(
-		envconfig.WithURL("ENDPOINT", func(u *url.URL) {
-			opts = append(opts, withEndpointScheme(u))
-			opts = append(opts, newSplitOption(func(cfg Config) Config {
-				cfg.Traces.Endpoint = u.Host
-				// For OTLP/HTTP endpoint URLs without a per-signal
-				// configuration, the passed endpoint is used as a base URL
-				// and the signals are sent to these paths relative to that.
-				cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
-				return cfg
-			}, withEndpointForGRPC(u)))
-		}),
-		envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
-			opts = append(opts, withEndpointScheme(u))
-			opts = append(opts, newSplitOption(func(cfg Config) Config {
-				cfg.Traces.Endpoint = u.Host
-				// For endpoint URLs for OTLP/HTTP per-signal variables, the
-				// URL MUST be used as-is without any modification. The only
-				// exception is that if an URL contains no path part, the root
-				// path / MUST be used.
-				path := u.Path
-				if path == "" {
-					path = "/"
-				}
-				cfg.Traces.URLPath = path
-				return cfg
-			}, withEndpointForGRPC(u)))
-		}),
-		envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
-		envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
-		envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
-		envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
-		withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
-		envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
-		envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
-		envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
-		envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
-		WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
-		WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
-		envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
-		envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
-	)
-
-	return opts
-}
-
-func withEndpointScheme(u *url.URL) GenericOption {
-	switch strings.ToLower(u.Scheme) {
-	case "http", "unix":
-		return WithInsecure()
-	default:
-		return WithSecure()
-	}
-}
-
-func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
-	return func(cfg Config) Config {
-		// For OTLP/gRPC endpoints, this is the target to which the
-		// exporter is going to send telemetry.
-		cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
-		return cfg
-	}
-}
-
-// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
-func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
-	return func(e *envconfig.EnvOptionsReader) {
-		if v, ok := e.GetEnvValue(n); ok {
-			cp := NoCompression
-			if v == "gzip" {
-				cp = GzipCompression
-			}
-
-			fn(cp)
-		}
-	}
-}
-
-// revive:disable-next-line:flag-parameter
-func withInsecure(b bool) GenericOption {
-	if b {
-		return WithInsecure()
-	}
-	return WithSecure()
-}
-
-func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
-	return func(e *envconfig.EnvOptionsReader) {
-		if c.RootCAs != nil || len(c.Certificates) > 0 {
-			fn(c)
-		}
-	}
-}
diff --git a/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/exporters/otlp/otlptrace/internal/otlpconfig/options.go
deleted file mode 100644
index 9d99c02365d..00000000000
--- a/exporters/otlp/otlptrace/internal/otlpconfig/options.go
+++ /dev/null
@@ -1,325 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
-
-import (
-	"crypto/tls"
-	"fmt"
-	"path"
-	"strings"
-	"time"
-
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/backoff"
-	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/credentials/insecure"
-	"google.golang.org/grpc/encoding/gzip"
-
-	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
-	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry" // nolint: staticcheck  // Atomic deprecation.
-)
-
-const (
-	// DefaultTracesPath is a default URL path for endpoint that
-	// receives spans.
-	DefaultTracesPath string = "/v1/traces"
-	// DefaultTimeout is a default max waiting time for the backend to process
-	// each span batch.
-	DefaultTimeout time.Duration = 10 * time.Second
-)
-
-type (
-	SignalConfig struct {
-		Endpoint    string
-		Insecure    bool
-		TLSCfg      *tls.Config
-		Headers     map[string]string
-		Compression Compression
-		Timeout     time.Duration
-		URLPath     string
-
-		// gRPC configurations
-		GRPCCredentials credentials.TransportCredentials
-	}
-
-	Config struct {
-		// Signal specific configurations
-		Traces SignalConfig
-
-		RetryConfig retry.Config
-
-		// gRPC configurations
-		ReconnectionPeriod time.Duration
-		ServiceConfig      string
-		DialOptions        []grpc.DialOption
-		GRPCConn           *grpc.ClientConn
-	}
-)
-
-// NewHTTPConfig returns a new Config with all settings applied from opts and
-// any unset setting using the default HTTP config values.
-func NewHTTPConfig(opts ...HTTPOption) Config {
-	cfg := Config{
-		Traces: SignalConfig{
-			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
-			URLPath:     DefaultTracesPath,
-			Compression: NoCompression,
-			Timeout:     DefaultTimeout,
-		},
-		RetryConfig: retry.DefaultConfig,
-	}
-	cfg = ApplyHTTPEnvConfigs(cfg)
-	for _, opt := range opts {
-		cfg = opt.ApplyHTTPOption(cfg)
-	}
-	cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
-	return cfg
-}
-
-// cleanPath returns a path with all spaces trimmed and all redundancies
-// removed. If urlPath is empty or cleaning it results in an empty string,
-// defaultPath is returned instead.
-func cleanPath(urlPath string, defaultPath string) string {
-	tmp := path.Clean(strings.TrimSpace(urlPath))
-	if tmp == "." {
-		return defaultPath
-	}
-	if !path.IsAbs(tmp) {
-		tmp = fmt.Sprintf("/%s", tmp)
-	}
-	return tmp
-}
-
-// NewGRPCConfig returns a new Config with all settings applied from opts and
-// any unset setting using the default gRPC config values.
-func NewGRPCConfig(opts ...GRPCOption) Config {
-	userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
-	cfg := Config{
-		Traces: SignalConfig{
-			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
-			URLPath:     DefaultTracesPath,
-			Compression: NoCompression,
-			Timeout:     DefaultTimeout,
-		},
-		RetryConfig: retry.DefaultConfig,
-		DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
-	}
-	cfg = ApplyGRPCEnvConfigs(cfg)
-	for _, opt := range opts {
-		cfg = opt.ApplyGRPCOption(cfg)
-	}
-
-	if cfg.ServiceConfig != "" {
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
-	}
-	// Priroritize GRPCCredentials over Insecure (passing both is an error).
-	if cfg.Traces.GRPCCredentials != nil {
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
-	} else if cfg.Traces.Insecure {
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
-	} else {
-		// Default to using the host's root CA.
-		creds := credentials.NewTLS(nil)
-		cfg.Traces.GRPCCredentials = creds
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
-	}
-	if cfg.Traces.Compression == GzipCompression {
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
-	}
-	if len(cfg.DialOptions) != 0 {
-		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
-	}
-	if cfg.ReconnectionPeriod != 0 {
-		p := grpc.ConnectParams{
-			Backoff:           backoff.DefaultConfig,
-			MinConnectTimeout: cfg.ReconnectionPeriod,
-		}
-		cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
-	}
-
-	return cfg
-}
-
-type (
-	// GenericOption applies an option to the HTTP or gRPC driver.
-	GenericOption interface {
-		ApplyHTTPOption(Config) Config
-		ApplyGRPCOption(Config) Config
-
-		// A private method to prevent users implementing the
-		// interface and so future additions to it will not
-		// violate compatibility.
-		private()
-	}
-
-	// HTTPOption applies an option to the HTTP driver.
-	HTTPOption interface {
-		ApplyHTTPOption(Config) Config
-
-		// A private method to prevent users implementing the
-		// interface and so future additions to it will not
-		// violate compatibility.
-		private()
-	}
-
-	// GRPCOption applies an option to the gRPC driver.
-	GRPCOption interface {
-		ApplyGRPCOption(Config) Config
-
-		// A private method to prevent users implementing the
-		// interface and so future additions to it will not
-		// violate compatibility.
-		private()
-	}
-)
-
-// genericOption is an option that applies the same logic
-// for both gRPC and HTTP.
-type genericOption struct {
-	fn func(Config) Config
-}
-
-func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
-	return g.fn(cfg)
-}
-
-func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
-	return g.fn(cfg)
-}
-
-func (genericOption) private() {}
-
-func newGenericOption(fn func(cfg Config) Config) GenericOption {
-	return &genericOption{fn: fn}
-}
-
-// splitOption is an option that applies different logics
-// for gRPC and HTTP.
-type splitOption struct {
-	httpFn func(Config) Config
-	grpcFn func(Config) Config
-}
-
-func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
-	return g.grpcFn(cfg)
-}
-
-func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
-	return g.httpFn(cfg)
-}
-
-func (splitOption) private() {}
-
-func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
-	return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
-}
-
-// httpOption is an option that is only applied to the HTTP driver.
-type httpOption struct {
-	fn func(Config) Config
-}
-
-func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
-	return h.fn(cfg)
-}
-
-func (httpOption) private() {}
-
-func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
-	return &httpOption{fn: fn}
-}
-
-// grpcOption is an option that is only applied to the gRPC driver.
-type grpcOption struct {
-	fn func(Config) Config
-}
-
-func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
-	return h.fn(cfg)
-}
-
-func (grpcOption) private() {}
-
-func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
-	return &grpcOption{fn: fn}
-}
-
-// Generic Options
-
-func WithEndpoint(endpoint string) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Traces.Endpoint = endpoint
-		return cfg
-	})
-}
-
-func WithCompression(compression Compression) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Traces.Compression = compression
-		return cfg
-	})
-}
-
-func WithURLPath(urlPath string) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Traces.URLPath = urlPath
-		return cfg
-	})
-}
-
-func WithRetry(rc retry.Config) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.RetryConfig = rc
-		return cfg
-	})
-}
-
-func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
-	return newSplitOption(func(cfg Config) Config {
-		cfg.Traces.TLSCfg = tlsCfg.Clone()
-		return cfg
-	}, func(cfg Config) Config {
-		cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
-		return cfg
-	})
-}
-
-func WithInsecure() GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Traces.Insecure = true
-		return cfg
-	})
-}
-
-func WithSecure() GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Traces.Insecure = false
-		return cfg
-	})
-}
-
-func WithHeaders(headers map[string]string) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Traces.Headers = headers
-		return cfg
-	})
-}
-
-func WithTimeout(duration time.Duration) GenericOption {
-	return newGenericOption(func(cfg Config) Config {
-		cfg.Traces.Timeout = duration
-		return cfg
-	})
-}
diff --git a/exporters/otlp/otlptrace/internal/otlpconfig/options_test.go b/exporters/otlp/otlptrace/internal/otlpconfig/options_test.go
deleted file mode 100644
index e3f83e64616..00000000000
--- a/exporters/otlp/otlptrace/internal/otlpconfig/options_test.go
+++ /dev/null
@@ -1,486 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otlpconfig
-
-import (
-	"errors"
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/assert"
-
-	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig" // nolint: staticcheck  // Atomic deprecation.
-)
-
-const (
-	WeakCertificate = `
------BEGIN CERTIFICATE-----
-MIIBhzCCASygAwIBAgIRANHpHgAWeTnLZpTSxCKs0ggwCgYIKoZIzj0EAwIwEjEQ
-MA4GA1UEChMHb3RlbC1nbzAeFw0yMTA0MDExMzU5MDNaFw0yMTA0MDExNDU5MDNa
-MBIxEDAOBgNVBAoTB290ZWwtZ28wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS9
-nWSkmPCxShxnp43F+PrOtbGV7sNfkbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0Z
-sJCLHGogQsYnWJBXUZOVo2MwYTAOBgNVHQ8BAf8EBAMCB4AwEwYDVR0lBAwwCgYI
-KwYBBQUHAwEwDAYDVR0TAQH/BAIwADAsBgNVHREEJTAjgglsb2NhbGhvc3SHEAAA
-AAAAAAAAAAAAAAAAAAGHBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhANwZVVKvfvQ/
-1HXsTvgH+xTQswOwSSKYJ1cVHQhqK7ZbAiEAus8NxpTRnp5DiTMuyVmhVNPB+bVH
-Lhnm4N/QDk5rek0=
------END CERTIFICATE-----
-`
-	WeakPrivateKey = `
------BEGIN PRIVATE KEY-----
-MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgN8HEXiXhvByrJ1zK
-SFT6Y2l2KqDWwWzKf+t4CyWrNKehRANCAAS9nWSkmPCxShxnp43F+PrOtbGV7sNf
-kbQ/kxzi9Ego0ZJdiXxkmv/C05QFddCW7Y0ZsJCLHGogQsYnWJBXUZOV
------END PRIVATE KEY-----
-`
-)
-
-type env map[string]string
-
-func (e *env) getEnv(env string) string {
-	return (*e)[env]
-}
-
-type fileReader map[string][]byte
-
-func (f *fileReader) readFile(filename string) ([]byte, error) {
-	if b, ok := (*f)[filename]; ok {
-		return b, nil
-	}
-	return nil, errors.New("file not found")
-}
-
-func TestConfigs(t *testing.T) {
-	tlsCert, err := CreateTLSConfig([]byte(WeakCertificate))
-	assert.NoError(t, err)
-
-	tests := []struct {
-		name       string
-		opts       []GenericOption
-		env        env
-		fileReader fileReader
-		asserts    func(t *testing.T, c *Config, grpcOption bool)
-	}{
-		{
-			name: "Test default configs",
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				if grpcOption {
-					assert.Equal(t, "localhost:4317", c.Traces.Endpoint)
-				} else {
-					assert.Equal(t, "localhost:4318", c.Traces.Endpoint)
-				}
-				assert.Equal(t, NoCompression, c.Traces.Compression)
-				assert.Equal(t, map[string]string(nil), c.Traces.Headers)
-				assert.Equal(t, 10*time.Second, c.Traces.Timeout)
-			},
-		},
-
-		// Endpoint Tests
-		{
-			name: "Test With Endpoint",
-			opts: []GenericOption{
-				WithEndpoint("someendpoint"),
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, "someendpoint", c.Traces.Endpoint)
-			},
-		},
-		{
-			name: "Test Environment Endpoint",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.False(t, c.Traces.Insecure)
-				if grpcOption {
-					assert.Equal(t, "env.endpoint/prefix", c.Traces.Endpoint)
-				} else {
-					assert.Equal(t, "env.endpoint", c.Traces.Endpoint)
-					assert.Equal(t, "/prefix/v1/traces", c.Traces.URLPath)
-				}
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Endpoint",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT":        "https://overrode.by.signal.specific/env/var",
-				"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": "http://env.traces.endpoint",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.True(t, c.Traces.Insecure)
-				assert.Equal(t, "env.traces.endpoint", c.Traces.Endpoint)
-				if !grpcOption {
-					assert.Equal(t, "/", c.Traces.URLPath)
-				}
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Endpoint",
-			opts: []GenericOption{
-				WithEndpoint("traces_endpoint"),
-			},
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, "traces_endpoint", c.Traces.Endpoint)
-			},
-		},
-		{
-			name: "Test Environment Endpoint with HTTP scheme",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, "env_endpoint", c.Traces.Endpoint)
-				assert.Equal(t, true, c.Traces.Insecure)
-			},
-		},
-		{
-			name: "Test Environment Endpoint with HTTP scheme and leading & trailingspaces",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "      http://env_endpoint    ",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, "env_endpoint", c.Traces.Endpoint)
-				assert.Equal(t, true, c.Traces.Insecure)
-			},
-		},
-		{
-			name: "Test Environment Endpoint with HTTPS scheme",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, "env_endpoint", c.Traces.Endpoint)
-				assert.Equal(t, false, c.Traces.Insecure)
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Endpoint with uppercase scheme",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_ENDPOINT":        "HTTPS://overrode_by_signal_specific",
-				"OTEL_EXPORTER_OTLP_TRACES_ENDPOINT": "HtTp://env_traces_endpoint",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, "env_traces_endpoint", c.Traces.Endpoint)
-				assert.Equal(t, true, c.Traces.Insecure)
-			},
-		},
-
-		// Certificate tests
-		{
-			name: "Test Default Certificate",
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				if grpcOption {
-					assert.NotNil(t, c.Traces.GRPCCredentials)
-				} else {
-					assert.Nil(t, c.Traces.TLSCfg)
-				}
-			},
-		},
-		{
-			name: "Test With Certificate",
-			opts: []GenericOption{
-				WithTLSClientConfig(tlsCert),
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				if grpcOption {
-					//TODO: make sure gRPC's credentials actually works
-					assert.NotNil(t, c.Traces.GRPCCredentials)
-				} else {
-					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-					assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects())
-				}
-			},
-		},
-		{
-			name: "Test Environment Certificate",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
-			},
-			fileReader: fileReader{
-				"cert_path": []byte(WeakCertificate),
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				if grpcOption {
-					assert.NotNil(t, c.Traces.GRPCCredentials)
-				} else {
-					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-					assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects())
-				}
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Certificate",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_CERTIFICATE":        "overrode_by_signal_specific",
-				"OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE": "cert_path",
-			},
-			fileReader: fileReader{
-				"cert_path":    []byte(WeakCertificate),
-				"invalid_cert": []byte("invalid certificate file."),
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				if grpcOption {
-					assert.NotNil(t, c.Traces.GRPCCredentials)
-				} else {
-					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-					assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects())
-				}
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Certificate",
-			opts: []GenericOption{},
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path",
-			},
-			fileReader: fileReader{
-				"cert_path": []byte(WeakCertificate),
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				if grpcOption {
-					assert.NotNil(t, c.Traces.GRPCCredentials)
-				} else {
-					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
-					assert.Equal(t, tlsCert.RootCAs.Subjects(), c.Traces.TLSCfg.RootCAs.Subjects())
-				}
-			},
-		},
-
-		// Headers tests
-		{
-			name: "Test With Headers",
-			opts: []GenericOption{
-				WithHeaders(map[string]string{"h1": "v1"}),
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, map[string]string{"h1": "v1"}, c.Traces.Headers)
-			},
-		},
-		{
-			name: "Test Environment Headers",
-			env:  map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers)
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Headers",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_HEADERS":        "overrode_by_signal_specific",
-				"OTEL_EXPORTER_OTLP_TRACES_HEADERS": "h1=v1,h2=v2",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers)
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Headers",
-			env:  map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"},
-			opts: []GenericOption{},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Traces.Headers)
-			},
-		},
-
-		// Compression Tests
-		{
-			name: "Test With Compression",
-			opts: []GenericOption{
-				WithCompression(GzipCompression),
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, GzipCompression, c.Traces.Compression)
-			},
-		},
-		{
-			name: "Test Environment Compression",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_COMPRESSION": "gzip",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, GzipCompression, c.Traces.Compression)
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Compression",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_TRACES_COMPRESSION": "gzip",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, GzipCompression, c.Traces.Compression)
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Compression",
-			opts: []GenericOption{
-				WithCompression(NoCompression),
-			},
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_TRACES_COMPRESSION": "gzip",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, NoCompression, c.Traces.Compression)
-			},
-		},
-
-		// Timeout Tests
-		{
-			name: "Test With Timeout",
-			opts: []GenericOption{
-				WithTimeout(time.Duration(5 * time.Second)),
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, 5*time.Second, c.Traces.Timeout)
-			},
-		},
-		{
-			name: "Test Environment Timeout",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_TIMEOUT": "15000",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, c.Traces.Timeout, 15*time.Second)
-			},
-		},
-		{
-			name: "Test Environment Signal Specific Timeout",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_TIMEOUT":        "15000",
-				"OTEL_EXPORTER_OTLP_TRACES_TIMEOUT": "27000",
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, c.Traces.Timeout, 27*time.Second)
-			},
-		},
-		{
-			name: "Test Mixed Environment and With Timeout",
-			env: map[string]string{
-				"OTEL_EXPORTER_OTLP_TIMEOUT":        "15000",
-				"OTEL_EXPORTER_OTLP_TRACES_TIMEOUT": "27000",
-			},
-			opts: []GenericOption{
-				WithTimeout(5 * time.Second),
-			},
-			asserts: func(t *testing.T, c *Config, grpcOption bool) {
-				assert.Equal(t, c.Traces.Timeout, 5*time.Second)
-			},
-		},
-	}
-
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			origEOR := DefaultEnvOptionsReader
-			DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
-				GetEnv:    tt.env.getEnv,
-				ReadFile:  tt.fileReader.readFile,
-				Namespace: "OTEL_EXPORTER_OTLP",
-			}
-			t.Cleanup(func() { DefaultEnvOptionsReader = origEOR })
-
-			// Tests Generic options as HTTP Options
-			cfg := NewHTTPConfig(asHTTPOptions(tt.opts)...)
-			tt.asserts(t, &cfg, false)
-
-			// Tests Generic options as gRPC Options
-			cfg = NewGRPCConfig(asGRPCOptions(tt.opts)...)
-			tt.asserts(t, &cfg, true)
-		})
-	}
-}
-
-func asHTTPOptions(opts []GenericOption) []HTTPOption {
-	converted := make([]HTTPOption, len(opts))
-	for i, o := range opts {
-		converted[i] = NewHTTPOption(o.ApplyHTTPOption)
-	}
-	return converted
-}
-
-func asGRPCOptions(opts []GenericOption) []GRPCOption {
-	converted := make([]GRPCOption, len(opts))
-	for i, o := range opts {
-		converted[i] = NewGRPCOption(o.ApplyGRPCOption)
-	}
-	return converted
-}
-
-func TestCleanPath(t *testing.T) {
-	type args struct {
-		urlPath     string
-		defaultPath string
-	}
-	tests := []struct {
-		name string
-		args args
-		want string
-	}{
-		{
-			name: "clean empty path",
-			args: args{
-				urlPath:     "",
-				defaultPath: "DefaultPath",
-			},
-			want: "DefaultPath",
-		},
-		{
-			name: "clean metrics path",
-			args: args{
-				urlPath:     "/prefix/v1/metrics",
-				defaultPath: "DefaultMetricsPath",
-			},
-			want: "/prefix/v1/metrics",
-		},
-		{
-			name: "clean traces path",
-			args: args{
-				urlPath:     "https://env_endpoint",
-				defaultPath: "DefaultTracesPath",
-			},
-			want: "/https:/env_endpoint",
-		},
-		{
-			name: "spaces trimmed",
-			args: args{
-				urlPath: " /dir",
-			},
-			want: "/dir",
-		},
-		{
-			name: "clean path empty",
-			args: args{
-				urlPath:     "dir/..",
-				defaultPath: "DefaultTracesPath",
-			},
-			want: "DefaultTracesPath",
-		},
-		{
-			name: "make absolute",
-			args: args{
-				urlPath: "dir/a",
-			},
-			want: "/dir/a",
-		},
-	}
-	for _, tt := range tests {
-		t.Run(tt.name, func(t *testing.T) {
-			if got := cleanPath(tt.args.urlPath, tt.args.defaultPath); got != tt.want {
-				t.Errorf("CleanPath() = %v, want %v", got, tt.want)
-			}
-		})
-	}
-}
diff --git a/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go b/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go
deleted file mode 100644
index c2d6c036152..00000000000
--- a/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
-
-const (
-	// DefaultCollectorGRPCPort is the default gRPC port of the collector.
-	DefaultCollectorGRPCPort uint16 = 4317
-	// DefaultCollectorHTTPPort is the default HTTP port of the collector.
-	DefaultCollectorHTTPPort uint16 = 4318
-	// DefaultCollectorHost is the host address the Exporter will attempt
-	// connect to if no collector address is provided.
-	DefaultCollectorHost string = "localhost"
-)
-
-// Compression describes the compression used for payloads sent to the
-// collector.
-type Compression int
-
-const (
-	// NoCompression tells the driver to send payloads without
-	// compression.
-	NoCompression Compression = iota
-	// GzipCompression tells the driver to send payloads after
-	// compressing them with gzip.
-	GzipCompression
-)
-
-// Marshaler describes the kind of message format sent to the collector.
-type Marshaler int
-
-const (
-	// MarshalProto tells the driver to send using the protobuf binary format.
-	MarshalProto Marshaler = iota
-	// MarshalJSON tells the driver to send using json format.
-	MarshalJSON
-)
diff --git a/exporters/otlp/otlptrace/internal/otlpconfig/tls.go b/exporters/otlp/otlptrace/internal/otlpconfig/tls.go
deleted file mode 100644
index 7287cf6cfeb..00000000000
--- a/exporters/otlp/otlptrace/internal/otlpconfig/tls.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig"
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"errors"
-)
-
-// CreateTLSConfig creates a tls.Config from a raw certificate bytes
-// to verify a server certificate.
-func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
-	cp := x509.NewCertPool()
-	if ok := cp.AppendCertsFromPEM(certBytes); !ok {
-		return nil, errors.New("failed to append certificate to the cert pool")
-	}
-
-	return &tls.Config{
-		RootCAs: cp,
-	}, nil
-}
diff --git a/exporters/otlp/otlptrace/internal/otlptracetest/client.go b/exporters/otlp/otlptrace/internal/otlptracetest/client.go
deleted file mode 100644
index aedb8f4a9d2..00000000000
--- a/exporters/otlp/otlptrace/internal/otlptracetest/client.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest"
-
-import (
-	"context"
-	"errors"
-	"sync"
-	"testing"
-	"time"
-
-	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
-)
-
-func RunExporterShutdownTest(t *testing.T, factory func() otlptrace.Client) {
-	t.Run("testClientStopHonorsTimeout", func(t *testing.T) {
-		testClientStopHonorsTimeout(t, factory())
-	})
-
-	t.Run("testClientStopHonorsCancel", func(t *testing.T) {
-		testClientStopHonorsCancel(t, factory())
-	})
-
-	t.Run("testClientStopNoError", func(t *testing.T) {
-		testClientStopNoError(t, factory())
-	})
-
-	t.Run("testClientStopManyTimes", func(t *testing.T) {
-		testClientStopManyTimes(t, factory())
-	})
-}
-
-func initializeExporter(t *testing.T, client otlptrace.Client) *otlptrace.Exporter {
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
-	defer cancel()
-
-	e, err := otlptrace.New(ctx, client)
-	if err != nil {
-		t.Fatalf("failed to create exporter")
-	}
-
-	return e
-}
-
-func testClientStopHonorsTimeout(t *testing.T, client otlptrace.Client) {
-	t.Cleanup(func() {
-		// The test is looking for a failed shut down. Call Stop a second time
-		// with an un-expired context to give the client a second chance at
-		// cleaning up. There is not guarantee from the Client interface this
-		// will succeed, therefore, no need to check the error (just give it a
-		// best try).
-		_ = client.Stop(context.Background())
-	})
-	e := initializeExporter(t, client)
-
-	ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
-	defer cancel()
-	<-ctx.Done()
-
-	if err := e.Shutdown(ctx); !errors.Is(err, context.DeadlineExceeded) {
-		t.Errorf("expected context DeadlineExceeded error, got %v", err)
-	}
-}
-
-func testClientStopHonorsCancel(t *testing.T, client otlptrace.Client) {
-	t.Cleanup(func() {
-		// The test is looking for a failed shut down. Call Stop a second time
-		// with an un-expired context to give the client a second chance at
-		// cleaning up. There is not guarantee from the Client interface this
-		// will succeed, therefore, no need to check the error (just give it a
-		// best try).
-		_ = client.Stop(context.Background())
-	})
-	e := initializeExporter(t, client)
-
-	ctx, cancel := context.WithCancel(context.Background())
-	cancel()
-
-	if err := e.Shutdown(ctx); !errors.Is(err, context.Canceled) {
-		t.Errorf("expected context canceled error, got %v", err)
-	}
-}
-
-func testClientStopNoError(t *testing.T, client otlptrace.Client) {
-	e := initializeExporter(t, client)
-
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
-	defer cancel()
-
-	if err := e.Shutdown(ctx); err != nil {
-		t.Errorf("shutdown errored: expected nil, got %v", err)
-	}
-}
-
-func testClientStopManyTimes(t *testing.T, client otlptrace.Client) {
-	e := initializeExporter(t, client)
-
-	ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
-	defer cancel()
-
-	ch := make(chan struct{})
-	wg := sync.WaitGroup{}
-	const num int = 20
-	wg.Add(num)
-	errs := make([]error, num)
-	for i := 0; i < num; i++ {
-		go func(idx int) {
-			defer wg.Done()
-			<-ch
-			errs[idx] = e.Shutdown(ctx)
-		}(i)
-	}
-	close(ch)
-	wg.Wait()
-	for _, err := range errs {
-		if err != nil {
-			t.Errorf("failed to shutdown exporter: %v", err)
-			return
-		}
-	}
-}
diff --git a/exporters/otlp/otlptrace/internal/otlptracetest/collector.go b/exporters/otlp/otlptrace/internal/otlptracetest/collector.go
deleted file mode 100644
index 865fabba27d..00000000000
--- a/exporters/otlp/otlptrace/internal/otlptracetest/collector.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest"
-
-import (
-	"sort"
-
-	collectortracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
-	commonpb "go.opentelemetry.io/proto/otlp/common/v1"
-	resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
-	tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
-)
-
-// TracesCollector mocks a collector for the end-to-end testing.
-type TracesCollector interface {
-	Stop() error
-	GetResourceSpans() []*tracepb.ResourceSpans
-}
-
-// SpansStorage stores the spans. Mock collectors can use it to
-// store spans they have received.
-type SpansStorage struct {
-	rsm       map[string]*tracepb.ResourceSpans
-	spanCount int
-}
-
-// NewSpansStorage creates a new spans storage.
-func NewSpansStorage() SpansStorage {
-	return SpansStorage{
-		rsm: make(map[string]*tracepb.ResourceSpans),
-	}
-}
-
-// AddSpans adds spans to the spans storage.
-func (s *SpansStorage) AddSpans(request *collectortracepb.ExportTraceServiceRequest) {
-	for _, rs := range request.GetResourceSpans() {
-		rstr := resourceString(rs.Resource)
-		if existingRs, ok := s.rsm[rstr]; !ok {
-			s.rsm[rstr] = rs
-			// TODO (rghetia): Add support for library Info.
-			if len(rs.ScopeSpans) == 0 {
-				rs.ScopeSpans = []*tracepb.ScopeSpans{
-					{
-						Spans: []*tracepb.Span{},
-					},
-				}
-			}
-			s.spanCount += len(rs.ScopeSpans[0].Spans)
-		} else {
-			if len(rs.ScopeSpans) > 0 {
-				newSpans := rs.ScopeSpans[0].GetSpans()
-				existingRs.ScopeSpans[0].Spans = append(existingRs.ScopeSpans[0].Spans, newSpans...)
-				s.spanCount += len(newSpans)
-			}
-		}
-	}
-}
-
-// GetSpans returns the stored spans.
-func (s *SpansStorage) GetSpans() []*tracepb.Span {
-	spans := make([]*tracepb.Span, 0, s.spanCount)
-	for _, rs := range s.rsm {
-		spans = append(spans, rs.ScopeSpans[0].Spans...)
-	}
-	return spans
-}
-
-// GetResourceSpans returns the stored resource spans.
-func (s *SpansStorage) GetResourceSpans() []*tracepb.ResourceSpans {
-	rss := make([]*tracepb.ResourceSpans, 0, len(s.rsm))
-	for _, rs := range s.rsm {
-		rss = append(rss, rs)
-	}
-	return rss
-}
-
-func resourceString(res *resourcepb.Resource) string {
-	sAttrs := sortedAttributes(res.GetAttributes())
-	rstr := ""
-	for _, attr := range sAttrs {
-		rstr = rstr + attr.String()
-	}
-	return rstr
-}
-
-func sortedAttributes(attrs []*commonpb.KeyValue) []*commonpb.KeyValue {
-	sort.Slice(attrs[:], func(i, j int) bool {
-		return attrs[i].Key < attrs[j].Key
-	})
-	return attrs
-}
diff --git a/exporters/otlp/otlptrace/internal/otlptracetest/data.go b/exporters/otlp/otlptrace/internal/otlptracetest/data.go
deleted file mode 100644
index d039105cb29..00000000000
--- a/exporters/otlp/otlptrace/internal/otlptracetest/data.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest"
-
-import (
-	"time"
-
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/codes"
-	"go.opentelemetry.io/otel/sdk/instrumentation"
-	"go.opentelemetry.io/otel/sdk/resource"
-	tracesdk "go.opentelemetry.io/otel/sdk/trace"
-	"go.opentelemetry.io/otel/sdk/trace/tracetest"
-	"go.opentelemetry.io/otel/trace"
-)
-
-// SingleReadOnlySpan returns a one-element slice with a read-only span. It
-// may be useful for testing driver's trace export.
-func SingleReadOnlySpan() []tracesdk.ReadOnlySpan {
-	return tracetest.SpanStubs{
-		{
-			SpanContext: trace.NewSpanContext(trace.SpanContextConfig{
-				TraceID:    trace.TraceID{2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9},
-				SpanID:     trace.SpanID{3, 4, 5, 6, 7, 8, 9, 0},
-				TraceFlags: trace.FlagsSampled,
-			}),
-			Parent: trace.NewSpanContext(trace.SpanContextConfig{
-				TraceID:    trace.TraceID{2, 3, 4, 5, 6, 7, 8, 9, 2, 3, 4, 5, 6, 7, 8, 9},
-				SpanID:     trace.SpanID{1, 2, 3, 4, 5, 6, 7, 8},
-				TraceFlags: trace.FlagsSampled,
-			}),
-			SpanKind:          trace.SpanKindInternal,
-			Name:              "foo",
-			StartTime:         time.Date(2020, time.December, 8, 20, 23, 0, 0, time.UTC),
-			EndTime:           time.Date(2020, time.December, 0, 20, 24, 0, 0, time.UTC),
-			Attributes:        []attribute.KeyValue{},
-			Events:            []tracesdk.Event{},
-			Links:             []tracesdk.Link{},
-			Status:            tracesdk.Status{Code: codes.Ok},
-			DroppedAttributes: 0,
-			DroppedEvents:     0,
-			DroppedLinks:      0,
-			ChildSpanCount:    0,
-			Resource:          resource.NewSchemaless(attribute.String("a", "b")),
-			InstrumentationLibrary: instrumentation.Library{
-				Name:    "bar",
-				Version: "0.0.0",
-			},
-		},
-	}.Snapshots()
-}
diff --git a/exporters/otlp/otlptrace/internal/otlptracetest/doc.go b/exporters/otlp/otlptrace/internal/otlptracetest/doc.go
deleted file mode 100644
index 16306de6951..00000000000
--- a/exporters/otlp/otlptrace/internal/otlptracetest/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package otlptracetest contains common functionality for testing all OTLP
-// trace exporters.
-//
-// Deprecated: package otlptracetest exists for historical compatibility, it
-// should not be used.
-package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest"
diff --git a/exporters/otlp/otlptrace/internal/otlptracetest/otlptest.go b/exporters/otlp/otlptrace/internal/otlptracetest/otlptest.go
deleted file mode 100644
index 91c098d1539..00000000000
--- a/exporters/otlp/otlptrace/internal/otlptracetest/otlptest.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otlptracetest // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest"
-
-import (
-	"context"
-	"testing"
-	"time"
-
-	"go.opentelemetry.io/otel/attribute"
-	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
-	"go.opentelemetry.io/otel/sdk/resource"
-	sdktrace "go.opentelemetry.io/otel/sdk/trace"
-	commonpb "go.opentelemetry.io/proto/otlp/common/v1"
-)
-
-// RunEndToEndTest can be used by otlptrace.Client tests to validate
-// themselves.
-func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlptrace.Exporter, tracesCollector TracesCollector) {
-	pOpts := []sdktrace.TracerProviderOption{
-		sdktrace.WithSampler(sdktrace.AlwaysSample()),
-		sdktrace.WithBatcher(
-			exp,
-			// add following two options to ensure flush
-			sdktrace.WithBatchTimeout(5*time.Second),
-			sdktrace.WithMaxExportBatchSize(10),
-		),
-	}
-	tp1 := sdktrace.NewTracerProvider(append(pOpts,
-		sdktrace.WithResource(resource.NewSchemaless(
-			attribute.String("rk1", "rv11)"),
-			attribute.Int64("rk2", 5),
-		)))...)
-
-	tp2 := sdktrace.NewTracerProvider(append(pOpts,
-		sdktrace.WithResource(resource.NewSchemaless(
-			attribute.String("rk1", "rv12)"),
-			attribute.Float64("rk3", 6.5),
-		)))...)
-
-	tr1 := tp1.Tracer("test-tracer1")
-	tr2 := tp2.Tracer("test-tracer2")
-	// Now create few spans
-	m := 4
-	for i := 0; i < m; i++ {
-		_, span := tr1.Start(ctx, "AlwaysSample")
-		span.SetAttributes(attribute.Int64("i", int64(i)))
-		span.End()
-
-		_, span = tr2.Start(ctx, "AlwaysSample")
-		span.SetAttributes(attribute.Int64("i", int64(i)))
-		span.End()
-	}
-
-	func() {
-		ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
-		defer cancel()
-		if err := tp1.Shutdown(ctx); err != nil {
-			t.Fatalf("failed to shut down a tracer provider 1: %v", err)
-		}
-		if err := tp2.Shutdown(ctx); err != nil {
-			t.Fatalf("failed to shut down a tracer provider 2: %v", err)
-		}
-	}()
-
-	// Wait >2 cycles.
-	<-time.After(40 * time.Millisecond)
-
-	// Now shutdown the exporter
-	ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
-	defer cancel()
-	if err := exp.Shutdown(ctx); err != nil {
-		t.Fatalf("failed to stop the exporter: %v", err)
-	}
-
-	// Shutdown the collector too so that we can begin
-	// verification checks of expected data back.
-	if err := tracesCollector.Stop(); err != nil {
-		t.Fatalf("failed to stop the mock collector: %v", err)
-	}
-
-	// Now verify that we only got two resources
-	rss := tracesCollector.GetResourceSpans()
-	if got, want := len(rss), 2; got != want {
-		t.Fatalf("resource span count: got %d, want %d\n", got, want)
-	}
-
-	// Now verify spans and attributes for each resource span.
-	for _, rs := range rss {
-		if len(rs.ScopeSpans) == 0 {
-			t.Fatalf("zero ScopeSpans")
-		}
-		if got, want := len(rs.ScopeSpans[0].Spans), m; got != want {
-			t.Fatalf("span counts: got %d, want %d", got, want)
-		}
-		attrMap := map[int64]bool{}
-		for _, s := range rs.ScopeSpans[0].Spans {
-			if gotName, want := s.Name, "AlwaysSample"; gotName != want {
-				t.Fatalf("span name: got %s, want %s", gotName, want)
-			}
-			attrMap[s.Attributes[0].Value.Value.(*commonpb.AnyValue_IntValue).IntValue] = true
-		}
-		if got, want := len(attrMap), m; got != want {
-			t.Fatalf("span attribute unique values: got %d  want %d", got, want)
-		}
-		for i := 0; i < m; i++ {
-			_, ok := attrMap[int64(i)]
-			if !ok {
-				t.Fatalf("span with attribute %d missing", i)
-			}
-		}
-	}
-}
diff --git a/exporters/otlp/otlptrace/internal/retry/retry.go b/exporters/otlp/otlptrace/internal/retry/retry.go
deleted file mode 100644
index 1af94419199..00000000000
--- a/exporters/otlp/otlptrace/internal/retry/retry.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package retry provides request retry functionality that can perform
-// configurable exponential backoff for transient errors and honor any
-// explicit throttle responses received.
-//
-// Deprecated: package retry exists for historical compatibility, it should not
-// be used.
-package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry"
-
-import (
-	"context"
-	"fmt"
-	"time"
-
-	"github.com/cenkalti/backoff/v4"
-)
-
-// DefaultConfig are the recommended defaults to use.
-var DefaultConfig = Config{
-	Enabled:         true,
-	InitialInterval: 5 * time.Second,
-	MaxInterval:     30 * time.Second,
-	MaxElapsedTime:  time.Minute,
-}
-
-// Config defines configuration for retrying batches in case of export failure
-// using an exponential backoff.
-type Config struct {
-	// Enabled indicates whether to not retry sending batches in case of
-	// export failure.
-	Enabled bool
-	// InitialInterval the time to wait after the first failure before
-	// retrying.
-	InitialInterval time.Duration
-	// MaxInterval is the upper bound on backoff interval. Once this value is
-	// reached the delay between consecutive retries will always be
-	// `MaxInterval`.
-	MaxInterval time.Duration
-	// MaxElapsedTime is the maximum amount of time (including retries) spent
-	// trying to send a request/batch.  Once this value is reached, the data
-	// is discarded.
-	MaxElapsedTime time.Duration
-}
-
-// RequestFunc wraps a request with retry logic.
-type RequestFunc func(context.Context, func(context.Context) error) error
-
-// EvaluateFunc returns if an error is retry-able and if an explicit throttle
-// duration should be honored that was included in the error.
-//
-// The function must return true if the error argument is retry-able,
-// otherwise it must return false for the first return parameter.
-//
-// The function must return a non-zero time.Duration if the error contains
-// explicit throttle duration that should be honored, otherwise it must return
-// a zero valued time.Duration.
-type EvaluateFunc func(error) (bool, time.Duration)
-
-// RequestFunc returns a RequestFunc using the evaluate function to determine
-// if requests can be retried and based on the exponential backoff
-// configuration of c.
-func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
-	if !c.Enabled {
-		return func(ctx context.Context, fn func(context.Context) error) error {
-			return fn(ctx)
-		}
-	}
-
-	return func(ctx context.Context, fn func(context.Context) error) error {
-		// Do not use NewExponentialBackOff since it calls Reset and the code here
-		// must call Reset after changing the InitialInterval (this saves an
-		// unnecessary call to Now).
-		b := &backoff.ExponentialBackOff{
-			InitialInterval:     c.InitialInterval,
-			RandomizationFactor: backoff.DefaultRandomizationFactor,
-			Multiplier:          backoff.DefaultMultiplier,
-			MaxInterval:         c.MaxInterval,
-			MaxElapsedTime:      c.MaxElapsedTime,
-			Stop:                backoff.Stop,
-			Clock:               backoff.SystemClock,
-		}
-		b.Reset()
-
-		for {
-			err := fn(ctx)
-			if err == nil {
-				return nil
-			}
-
-			retryable, throttle := evaluate(err)
-			if !retryable {
-				return err
-			}
-
-			bOff := b.NextBackOff()
-			if bOff == backoff.Stop {
-				return fmt.Errorf("max retry time elapsed: %w", err)
-			}
-
-			// Wait for the greater of the backoff or throttle delay.
-			var delay time.Duration
-			if bOff > throttle {
-				delay = bOff
-			} else {
-				elapsed := b.GetElapsedTime()
-				if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
-					return fmt.Errorf("max retry time would elapse: %w", err)
-				}
-				delay = throttle
-			}
-
-			if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
-				return fmt.Errorf("%w: %s", ctxErr, err)
-			}
-		}
-	}
-}
-
-// Allow override for testing.
-var waitFunc = wait
-
-// wait takes the caller's context, and the amount of time to wait.  It will
-// return nil if the timer fires before or at the same time as the context's
-// deadline.  This indicates that the call can be retried.
-func wait(ctx context.Context, delay time.Duration) error {
-	timer := time.NewTimer(delay)
-	defer timer.Stop()
-
-	select {
-	case <-ctx.Done():
-		// Handle the case where the timer and context deadline end
-		// simultaneously by prioritizing the timer expiration nil value
-		// response.
-		select {
-		case <-timer.C:
-		default:
-			return ctx.Err()
-		}
-	case <-timer.C:
-	}
-
-	return nil
-}
diff --git a/exporters/otlp/otlptrace/internal/retry/retry_test.go b/exporters/otlp/otlptrace/internal/retry/retry_test.go
deleted file mode 100644
index de574a73579..00000000000
--- a/exporters/otlp/otlptrace/internal/retry/retry_test.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package retry
-
-import (
-	"context"
-	"errors"
-	"math"
-	"sync"
-	"testing"
-	"time"
-
-	"github.com/cenkalti/backoff/v4"
-	"github.com/stretchr/testify/assert"
-)
-
-func TestWait(t *testing.T) {
-	tests := []struct {
-		ctx      context.Context
-		delay    time.Duration
-		expected error
-	}{
-		{
-			ctx:   context.Background(),
-			delay: time.Duration(0),
-		},
-		{
-			ctx:   context.Background(),
-			delay: time.Duration(1),
-		},
-		{
-			ctx:   context.Background(),
-			delay: time.Duration(-1),
-		},
-		{
-			ctx: func() context.Context {
-				ctx, cancel := context.WithCancel(context.Background())
-				cancel()
-				return ctx
-			}(),
-			// Ensure the timer and context do not end simultaneously.
-			delay:    1 * time.Hour,
-			expected: context.Canceled,
-		},
-	}
-
-	for _, test := range tests {
-		err := wait(test.ctx, test.delay)
-		if test.expected == nil {
-			assert.NoError(t, err)
-		} else {
-			assert.ErrorIs(t, err, test.expected)
-		}
-	}
-}
-
-func TestNonRetryableError(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return false, 0 }
-
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: 1 * time.Nanosecond,
-		MaxInterval:     1 * time.Nanosecond,
-		// Never stop retrying.
-		MaxElapsedTime: 0,
-	}.RequestFunc(ev)
-	ctx := context.Background()
-	assert.NoError(t, reqFunc(ctx, func(context.Context) error {
-		return nil
-	}))
-	assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
-		return assert.AnError
-	}), assert.AnError)
-}
-
-func TestThrottledRetry(t *testing.T) {
-	// Ensure the throttle delay is used by making longer than backoff delay.
-	throttleDelay, backoffDelay := time.Second, time.Nanosecond
-
-	ev := func(error) (bool, time.Duration) {
-		// Retry everything with a throttle delay.
-		return true, throttleDelay
-	}
-
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: backoffDelay,
-		MaxInterval:     backoffDelay,
-		// Never stop retrying.
-		MaxElapsedTime: 0,
-	}.RequestFunc(ev)
-
-	origWait := waitFunc
-	var done bool
-	waitFunc = func(_ context.Context, delay time.Duration) error {
-		assert.Equal(t, throttleDelay, delay, "retry not throttled")
-		// Try twice to ensure call is attempted again after delay.
-		if done {
-			return assert.AnError
-		}
-		done = true
-		return nil
-	}
-	defer func() { waitFunc = origWait }()
-
-	ctx := context.Background()
-	assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
-		return errors.New("not this error")
-	}), assert.AnError)
-}
-
-func TestBackoffRetry(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return true, 0 }
-
-	delay := time.Nanosecond
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: delay,
-		MaxInterval:     delay,
-		// Never stop retrying.
-		MaxElapsedTime: 0,
-	}.RequestFunc(ev)
-
-	origWait := waitFunc
-	var done bool
-	waitFunc = func(_ context.Context, d time.Duration) error {
-		delta := math.Ceil(float64(delay) * backoff.DefaultRandomizationFactor)
-		assert.InDelta(t, delay, d, delta, "retry not backoffed")
-		// Try twice to ensure call is attempted again after delay.
-		if done {
-			return assert.AnError
-		}
-		done = true
-		return nil
-	}
-	t.Cleanup(func() { waitFunc = origWait })
-
-	ctx := context.Background()
-	assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
-		return errors.New("not this error")
-	}), assert.AnError)
-}
-
-func TestBackoffRetryCanceledContext(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return true, 0 }
-
-	delay := time.Millisecond
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: delay,
-		MaxInterval:     delay,
-		// Never stop retrying.
-		MaxElapsedTime: 10 * time.Millisecond,
-	}.RequestFunc(ev)
-
-	ctx, cancel := context.WithCancel(context.Background())
-	count := 0
-	cancel()
-	err := reqFunc(ctx, func(context.Context) error {
-		count++
-		return assert.AnError
-	})
-
-	assert.ErrorIs(t, err, context.Canceled)
-	assert.Contains(t, err.Error(), assert.AnError.Error())
-	assert.Equal(t, 1, count)
-}
-
-func TestThrottledRetryGreaterThanMaxElapsedTime(t *testing.T) {
-	// Ensure the throttle delay is used by making longer than backoff delay.
-	tDelay, bDelay := time.Hour, time.Nanosecond
-	ev := func(error) (bool, time.Duration) { return true, tDelay }
-	reqFunc := Config{
-		Enabled:         true,
-		InitialInterval: bDelay,
-		MaxInterval:     bDelay,
-		MaxElapsedTime:  tDelay - (time.Nanosecond),
-	}.RequestFunc(ev)
-
-	ctx := context.Background()
-	assert.Contains(t, reqFunc(ctx, func(context.Context) error {
-		return assert.AnError
-	}).Error(), "max retry time would elapse: ")
-}
-
-func TestMaxElapsedTime(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return true, 0 }
-	delay := time.Nanosecond
-	reqFunc := Config{
-		Enabled: true,
-		// InitialInterval > MaxElapsedTime means immediate return.
-		InitialInterval: 2 * delay,
-		MaxElapsedTime:  delay,
-	}.RequestFunc(ev)
-
-	ctx := context.Background()
-	assert.Contains(t, reqFunc(ctx, func(context.Context) error {
-		return assert.AnError
-	}).Error(), "max retry time elapsed: ")
-}
-
-func TestRetryNotEnabled(t *testing.T) {
-	ev := func(error) (bool, time.Duration) {
-		t.Error("evaluated retry when not enabled")
-		return false, 0
-	}
-
-	reqFunc := Config{}.RequestFunc(ev)
-	ctx := context.Background()
-	assert.NoError(t, reqFunc(ctx, func(context.Context) error {
-		return nil
-	}))
-	assert.ErrorIs(t, reqFunc(ctx, func(context.Context) error {
-		return assert.AnError
-	}), assert.AnError)
-}
-
-func TestRetryConcurrentSafe(t *testing.T) {
-	ev := func(error) (bool, time.Duration) { return true, 0 }
-	reqFunc := Config{
-		Enabled: true,
-	}.RequestFunc(ev)
-
-	var wg sync.WaitGroup
-	ctx := context.Background()
-
-	for i := 1; i < 5; i++ {
-		wg.Add(1)
-
-		go func() {
-			defer wg.Done()
-
-			var done bool
-			assert.NoError(t, reqFunc(ctx, func(context.Context) error {
-				if !done {
-					done = true
-					return assert.AnError
-				}
-
-				return nil
-			}))
-		}()
-	}
-
-	wg.Wait()
-}
diff --git a/exporters/otlp/otlptrace/internal/tracetransform/span_test.go b/exporters/otlp/otlptrace/internal/tracetransform/span_test.go
index 7e0be6cabb7..7a4fa1e9185 100644
--- a/exporters/otlp/otlptrace/internal/tracetransform/span_test.go
+++ b/exporters/otlp/otlptrace/internal/tracetransform/span_test.go
@@ -213,12 +213,14 @@ func TestSpanData(t *testing.T) {
 		StartTime: startTime,
 		EndTime:   endTime,
 		Events: []tracesdk.Event{
-			{Time: startTime,
+			{
+				Time: startTime,
 				Attributes: []attribute.KeyValue{
 					attribute.Int64("CompressedByteSize", 512),
 				},
 			},
-			{Time: endTime,
+			{
+				Time: endTime,
 				Attributes: []attribute.KeyValue{
 					attribute.String("EventType", "Recv"),
 				},
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/client.go b/exporters/otlp/otlptrace/otlptracegrpc/client.go
index 86fb61a0dec..b4cc21d7a3c 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/client.go
+++ b/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -260,30 +260,38 @@ func (c *client) exportContext(parent context.Context) (context.Context, context
 // duration to wait for if an explicit throttle time is included in err.
 func retryable(err error) (bool, time.Duration) {
 	s := status.Convert(err)
+	return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
 	switch s.Code() {
 	case codes.Canceled,
 		codes.DeadlineExceeded,
-		codes.ResourceExhausted,
 		codes.Aborted,
 		codes.OutOfRange,
 		codes.Unavailable,
 		codes.DataLoss:
-		return true, throttleDelay(s)
+		// Additionally handle RetryInfo.
+		_, d := throttleDelay(s)
+		return true, d
+	case codes.ResourceExhausted:
+		// Retry only if the server signals that the recovery from resource exhaustion is possible.
+		return throttleDelay(s)
 	}
 
 	// Not a retry-able error.
 	return false, 0
 }
 
-// throttleDelay returns a duration to wait for if an explicit throttle time
-// is included in the response status.
-func throttleDelay(s *status.Status) time.Duration {
+// throttleDelay returns of the status is RetryInfo
+// and the its duration to wait for if an explicit throttle time.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
 	for _, detail := range s.Details() {
 		if t, ok := detail.(*errdetails.RetryInfo); ok {
-			return t.RetryDelay.AsDuration()
+			return true, t.RetryDelay.AsDuration()
 		}
 	}
-	return 0
+	return false, 0
 }
 
 // MarshalLog is the marshaling function used by the logging system to represent this Client.
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/client_test.go b/exporters/otlp/otlptrace/otlptracegrpc/client_test.go
index b25606d232d..aa531eb926e 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/client_test.go
+++ b/exporters/otlp/otlptrace/otlptracegrpc/client_test.go
@@ -188,7 +188,7 @@ func TestNewCollectorOnBadConnection(t *testing.T) {
 	endpoint := fmt.Sprintf("localhost:%s", collectorPortStr)
 	ctx := context.Background()
 	exp := newGRPCExporter(t, ctx, endpoint)
-	_ = exp.Shutdown(ctx)
+	require.NoError(t, exp.Shutdown(ctx))
 }
 
 func TestNewWithEndpoint(t *testing.T) {
@@ -197,7 +197,7 @@ func TestNewWithEndpoint(t *testing.T) {
 
 	ctx := context.Background()
 	exp := newGRPCExporter(t, ctx, mc.endpoint)
-	_ = exp.Shutdown(ctx)
+	require.NoError(t, exp.Shutdown(ctx))
 }
 
 func TestNewWithHeaders(t *testing.T) {
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/client_unit_test.go b/exporters/otlp/otlptrace/otlptracegrpc/client_unit_test.go
index 5c43df1e322..df27a208daf 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/client_unit_test.go
+++ b/exporters/otlp/otlptrace/otlptracegrpc/client_unit_test.go
@@ -27,19 +27,21 @@ import (
 	"google.golang.org/protobuf/types/known/durationpb"
 )
 
-func TestThrottleDuration(t *testing.T) {
+func TestThrottleDelay(t *testing.T) {
 	c := codes.ResourceExhausted
 	testcases := []struct {
-		status   *status.Status
-		expected time.Duration
+		status       *status.Status
+		wantOK       bool
+		wantDuration time.Duration
 	}{
 		{
-			status:   status.New(c, "no retry info"),
-			expected: 0,
+			status:       status.New(c, "NoRetryInfo"),
+			wantOK:       false,
+			wantDuration: 0,
 		},
 		{
 			status: func() *status.Status {
-				s, err := status.New(c, "single retry info").WithDetails(
+				s, err := status.New(c, "SingleRetryInfo").WithDetails(
 					&errdetails.RetryInfo{
 						RetryDelay: durationpb.New(15 * time.Millisecond),
 					},
@@ -47,21 +49,23 @@ func TestThrottleDuration(t *testing.T) {
 				require.NoError(t, err)
 				return s
 			}(),
-			expected: 15 * time.Millisecond,
+			wantOK:       true,
+			wantDuration: 15 * time.Millisecond,
 		},
 		{
 			status: func() *status.Status {
-				s, err := status.New(c, "error info").WithDetails(
+				s, err := status.New(c, "ErrorInfo").WithDetails(
 					&errdetails.ErrorInfo{Reason: "no throttle detail"},
 				)
 				require.NoError(t, err)
 				return s
 			}(),
-			expected: 0,
+			wantOK:       false,
+			wantDuration: 0,
 		},
 		{
 			status: func() *status.Status {
-				s, err := status.New(c, "error and retry info").WithDetails(
+				s, err := status.New(c, "ErrorAndRetryInfo").WithDetails(
 					&errdetails.ErrorInfo{Reason: "with throttle detail"},
 					&errdetails.RetryInfo{
 						RetryDelay: durationpb.New(13 * time.Minute),
@@ -70,11 +74,12 @@ func TestThrottleDuration(t *testing.T) {
 				require.NoError(t, err)
 				return s
 			}(),
-			expected: 13 * time.Minute,
+			wantOK:       true,
+			wantDuration: 13 * time.Minute,
 		},
 		{
 			status: func() *status.Status {
-				s, err := status.New(c, "double retry info").WithDetails(
+				s, err := status.New(c, "DoubleRetryInfo").WithDetails(
 					&errdetails.RetryInfo{
 						RetryDelay: durationpb.New(13 * time.Minute),
 					},
@@ -85,13 +90,16 @@ func TestThrottleDuration(t *testing.T) {
 				require.NoError(t, err)
 				return s
 			}(),
-			expected: 13 * time.Minute,
+			wantOK:       true,
+			wantDuration: 13 * time.Minute,
 		},
 	}
 
 	for _, tc := range testcases {
 		t.Run(tc.status.Message(), func(t *testing.T) {
-			require.Equal(t, tc.expected, throttleDelay(tc.status))
+			ok, d := throttleDelay(tc.status)
+			assert.Equal(t, tc.wantOK, ok)
+			assert.Equal(t, tc.wantDuration, d)
 		})
 	}
 }
@@ -106,7 +114,7 @@ func TestRetryable(t *testing.T) {
 		codes.NotFound:           false,
 		codes.AlreadyExists:      false,
 		codes.PermissionDenied:   false,
-		codes.ResourceExhausted:  true,
+		codes.ResourceExhausted:  false,
 		codes.FailedPrecondition: false,
 		codes.Aborted:            true,
 		codes.OutOfRange:         true,
@@ -123,6 +131,20 @@ func TestRetryable(t *testing.T) {
 	}
 }
 
+func TestRetryableGRPCStatusResourceExhaustedWithRetryInfo(t *testing.T) {
+	delay := 15 * time.Millisecond
+	s, err := status.New(codes.ResourceExhausted, "WithRetryInfo").WithDetails(
+		&errdetails.RetryInfo{
+			RetryDelay: durationpb.New(delay),
+		},
+	)
+	require.NoError(t, err)
+
+	ok, d := retryableGRPCStatus(s)
+	assert.True(t, ok)
+	assert.Equal(t, delay, d)
+}
+
 func TestUnstartedStop(t *testing.T) {
 	client := NewClient()
 	assert.ErrorIs(t, client.Stop(context.Background()), errAlreadyStopped)
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/exporters/otlp/otlptrace/otlptracegrpc/doc.go
new file mode 100644
index 00000000000..1f514ef9ea3
--- /dev/null
+++ b/exporters/otlp/otlptrace/otlptracegrpc/doc.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package otlptracegrpc provides an OTLP span exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a host.
+The value may additionally a port, a scheme, and a path.
+The value accepts "http" and "https" scheme.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT.
+OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath  to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/exporters/otlp/internal/wrappederror_test.go b/exporters/otlp/otlptrace/otlptracegrpc/example_test.go
similarity index 52%
rename from exporters/otlp/internal/wrappederror_test.go
rename to exporters/otlp/otlptrace/otlptracegrpc/example_test.go
index 995358a9f8e..ee03d1b505a 100644
--- a/exporters/otlp/internal/wrappederror_test.go
+++ b/exporters/otlp/otlptrace/otlptracegrpc/example_test.go
@@ -12,21 +12,31 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal"
+package otlptracegrpc_test
 
 import (
 	"context"
-	"errors"
-	"testing"
 
-	"github.com/stretchr/testify/require"
+	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+	"go.opentelemetry.io/otel/sdk/trace"
 )
 
-func TestWrappedError(t *testing.T) {
-	e := WrapTracesError(context.Canceled)
+func Example() {
+	ctx := context.Background()
+	exp, err := otlptracegrpc.New(ctx)
+	if err != nil {
+		panic(err)
+	}
 
-	require.Equal(t, context.Canceled, errors.Unwrap(e))
-	require.Equal(t, TracesExport, e.(wrappedExportError).kind)
-	require.Equal(t, "traces export: context canceled", e.Error())
-	require.True(t, errors.Is(e, context.Canceled))
+	tracerProvider := trace.NewTracerProvider(trace.WithBatcher(exp))
+	defer func() {
+		if err := tracerProvider.Shutdown(ctx); err != nil {
+			panic(err)
+		}
+	}()
+	otel.SetTracerProvider(tracerProvider)
+
+	// From here, the tracerProvider can be used by instrumentation to collect
+	// telemetry.
 }
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.mod b/exporters/otlp/otlptrace/otlptracegrpc/go.mod
index 8e54df641bc..1b87fd3e494 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/go.mod
+++ b/exporters/otlp/otlptrace/otlptracegrpc/go.mod
@@ -1,33 +1,34 @@
 module go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
 
-go 1.19
+go 1.20
 
 require (
 	github.com/cenkalti/backoff/v4 v4.2.1
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 	go.opentelemetry.io/proto/otlp v1.0.0
-	go.uber.org/goleak v1.2.1
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc
-	google.golang.org/grpc v1.57.0
+	go.uber.org/goleak v1.3.0
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d
+	google.golang.org/grpc v1.59.0
 	google.golang.org/protobuf v1.31.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+	github.com/kr/text v0.2.0 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/net v0.10.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	golang.org/x/text v0.9.0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	golang.org/x/net v0.17.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+	golang.org/x/text v0.13.0 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/go.sum b/exporters/otlp/otlptrace/otlptracegrpc/go.sum
index 7daf373b691..9d581c2ed07 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/go.sum
+++ b/exporters/otlp/otlptrace/otlptracegrpc/go.sum
@@ -1,22 +1,24 @@
 github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
 github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
+github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@@ -24,22 +26,22 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
 go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
-go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
+go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
-google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
-google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
index becb1f0fbbe..5530119e4cf 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
+++ b/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
@@ -174,13 +174,13 @@ func stringToHeader(value string) map[string]string {
 			global.Error(errors.New("missing '="), "parse headers", "input", header)
 			continue
 		}
-		name, err := url.QueryUnescape(n)
+		name, err := url.PathUnescape(n)
 		if err != nil {
 			global.Error(err, "escape header key", "key", n)
 			continue
 		}
 		trimmedName := strings.TrimSpace(name)
-		value, err := url.QueryUnescape(v)
+		value, err := url.PathUnescape(v)
 		if err != nil {
 			global.Error(err, "escape header value", "value", v)
 			continue
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig_test.go b/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig_test.go
index cec506208d5..6cbe0c7ab11 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig_test.go
+++ b/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig_test.go
@@ -427,7 +427,12 @@ func TestStringToHeader(t *testing.T) {
 			want:  map[string]string{"userId": "alice"},
 		},
 		{
-			name:  "multiples headers encoded",
+			name:  "simple header conforms to RFC 3986 spec",
+			value: " userId = alice+test ",
+			want:  map[string]string{"userId": "alice+test"},
+		},
+		{
+			name:  "multiple headers encoded",
 			value: "userId=alice,serverNode=DF%3A28,isProduction=false",
 			want: map[string]string{
 				"userId":       "alice",
@@ -435,6 +440,16 @@ func TestStringToHeader(t *testing.T) {
 				"isProduction": "false",
 			},
 		},
+		{
+			name:  "multiple headers encoded per RFC 3986 spec",
+			value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test",
+			want: map[string]string{
+				"userId":       "alice+test",
+				"serverNode":   "DF:28",
+				"isProduction": "false",
+				"namespace":    "localhost/test",
+			},
+		},
 		{
 			name:  "invalid headers format",
 			value: "userId:alice",
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
index 19b8434d4d2..dddb1f334de 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
+++ b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -141,9 +141,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
 	if cfg.Traces.Compression == GzipCompression {
 		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
 	}
-	if len(cfg.DialOptions) != 0 {
-		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
-	}
 	if cfg.ReconnectionPeriod != 0 {
 		p := grpc.ConnectParams{
 			Backoff:           backoff.DefaultConfig,
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options_test.go b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options_test.go
index e947cdcb86e..567f12f1018 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options_test.go
+++ b/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options_test.go
@@ -201,7 +201,7 @@ func TestConfigs(t *testing.T) {
 			},
 			asserts: func(t *testing.T, c *Config, grpcOption bool) {
 				if grpcOption {
-					//TODO: make sure gRPC's credentials actually works
+					// TODO: make sure gRPC's credentials actually works
 					assert.NotNil(t, c.Traces.GRPCCredentials)
 				} else {
 					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
diff --git a/exporters/otlp/otlptrace/otlptracegrpc/options.go b/exporters/otlp/otlptrace/otlptracegrpc/options.go
index 78ce9ad8f0b..17ffeaf6ef0 100644
--- a/exporters/otlp/otlptrace/otlptracegrpc/options.go
+++ b/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -93,13 +93,7 @@ func compressorToCompression(compressor string) otlpconfig.Compression {
 }
 
 // WithCompressor sets the compressor for the gRPC client to use when sending
-// requests. It is the responsibility of the caller to ensure that the
-// compressor set has been registered with google.golang.org/grpc/encoding.
-// This can be done by encoding.RegisterCompressor. Some compressors
-// auto-register on import, such as gzip, which can be registered by calling
-// `import _ "google.golang.org/grpc/encoding/gzip"`.
-//
-// This option has no effect if WithGRPCConn is used.
+// requests. Supported compressor values: "gzip".
 func WithCompressor(compressor string) Option {
 	return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
 }
diff --git a/exporters/otlp/otlptrace/otlptracehttp/client.go b/exporters/otlp/otlptrace/otlptracehttp/client.go
index 3a3cfec0cde..3b5f3839f27 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/client.go
+++ b/exporters/otlp/otlptrace/otlptracehttp/client.go
@@ -18,6 +18,7 @@ import (
 	"bytes"
 	"compress/gzip"
 	"context"
+	"errors"
 	"fmt"
 	"io"
 	"net"
@@ -152,6 +153,10 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
 
 		request.reset(ctx)
 		resp, err := d.client.Do(request.Request)
+		var urlErr *url.Error
+		if errors.As(err, &urlErr) && urlErr.Temporary() {
+			return newResponseError(http.Header{})
+		}
 		if err != nil {
 			return err
 		}
@@ -172,8 +177,11 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
 			if _, err := io.Copy(&respData, resp.Body); err != nil {
 				return err
 			}
+			if respData.Len() == 0 {
+				return nil
+			}
 
-			if respData.Len() != 0 {
+			if resp.Header.Get("Content-Type") == "application/x-protobuf" {
 				var respProto coltracepb.ExportTraceServiceResponse
 				if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil {
 					return err
@@ -190,7 +198,10 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc
 			}
 			return nil
 
-		case sc == http.StatusTooManyRequests, sc == http.StatusServiceUnavailable:
+		case sc == http.StatusTooManyRequests,
+			sc == http.StatusBadGateway,
+			sc == http.StatusServiceUnavailable,
+			sc == http.StatusGatewayTimeout:
 			// Retry-able failures.  Drain the body to reuse the connection.
 			if _, err := io.Copy(io.Discard, resp.Body); err != nil {
 				otel.Handle(err)
diff --git a/exporters/otlp/otlptrace/otlptracehttp/client_test.go b/exporters/otlp/otlptrace/otlptracehttp/client_test.go
index 2020b15058b..63a4cd4f207 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/client_test.go
+++ b/exporters/otlp/otlptrace/otlptracehttp/client_test.go
@@ -19,7 +19,6 @@ import (
 	"errors"
 	"fmt"
 	"net/http"
-	"os"
 	"strings"
 	"testing"
 	"time"
@@ -95,7 +94,7 @@ func TestEndToEnd(t *testing.T) {
 				}),
 			},
 			mcCfg: mockCollectorConfig{
-				InjectHTTPStatus: []int{503, 503},
+				InjectHTTPStatus: []int{503, 502},
 			},
 		},
 		{
@@ -110,7 +109,7 @@ func TestEndToEnd(t *testing.T) {
 				}),
 			},
 			mcCfg: mockCollectorConfig{
-				InjectHTTPStatus: []int{503},
+				InjectHTTPStatus: []int{504},
 				InjectResponseHeader: []map[string]string{
 					{"Retry-After": "10"},
 				},
@@ -213,6 +212,7 @@ func TestTimeout(t *testing.T) {
 		otlptracehttp.WithEndpoint(mc.Endpoint()),
 		otlptracehttp.WithInsecure(),
 		otlptracehttp.WithTimeout(time.Nanosecond),
+		otlptracehttp.WithRetry(otlptracehttp.RetryConfig{Enabled: false}),
 	)
 	ctx := context.Background()
 	exporter, err := otlptrace.New(ctx, client)
@@ -221,9 +221,7 @@ func TestTimeout(t *testing.T) {
 		assert.NoError(t, exporter.Shutdown(ctx))
 	}()
 	err = exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan())
-	unwrapped := errors.Unwrap(err)
-	assert.Equalf(t, true, os.IsTimeout(unwrapped), "expected timeout error, got: %v", unwrapped)
-	assert.True(t, strings.HasPrefix(err.Error(), "traces export: "), err)
+	assert.ErrorContains(t, err, "retry-able request failure")
 }
 
 func TestNoRetry(t *testing.T) {
@@ -432,3 +430,24 @@ func TestOtherHTTPSuccess(t *testing.T) {
 		})
 	}
 }
+
+func TestCollectorRespondingNonProtobufContent(t *testing.T) {
+	mcCfg := mockCollectorConfig{
+		InjectContentType: "application/octet-stream",
+	}
+	mc := runMockCollector(t, mcCfg)
+	defer mc.MustStop(t)
+	driver := otlptracehttp.NewClient(
+		otlptracehttp.WithEndpoint(mc.Endpoint()),
+		otlptracehttp.WithInsecure(),
+	)
+	ctx := context.Background()
+	exporter, err := otlptrace.New(ctx, driver)
+	require.NoError(t, err)
+	defer func() {
+		assert.NoError(t, exporter.Shutdown(context.Background()))
+	}()
+	err = exporter.ExportSpans(ctx, otlptracetest.SingleReadOnlySpan())
+	assert.NoError(t, err)
+	assert.Len(t, mc.GetSpans(), 1)
+}
diff --git a/exporters/otlp/otlptrace/otlptracehttp/doc.go b/exporters/otlp/otlptrace/otlptracehttp/doc.go
index e7f066b43ce..854cc38c8e4 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/doc.go
+++ b/exporters/otlp/otlptrace/otlptracehttp/doc.go
@@ -13,7 +13,62 @@
 // limitations under the License.
 
 /*
-Package otlptracehttp a client that sends traces to the collector using HTTP
-with binary protobuf payloads.
+Package otlptracehttp provides an OTLP span exporter using HTTP with protobuf payloads.
+By default the telemetry is sent to https://localhost:4318/v1/traces.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT (default: "https://localhost:4318") -
+target base URL ("/v1/traces" is appended) to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+environment variable and by [WithEndpoint], [WithInsecure] options.
+
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4318/v1/traces") -
+target URL to which the exporter sends telemetry.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port and a path.
+The value should not contain a query string or fragment.
+The configuration can be overridden by [WithEndpoint], [WitnInsecure], [WithURLPath] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as headers associated with HTTP requests.
+The value is expected to be represented in a format matching to the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the compression strategy the exporter uses to compress the HTTP body.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompression] option.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath  to the clients private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSClientConfig] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
 */
 package otlptracehttp // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
diff --git a/exporters/otlp/otlptrace/otlptracehttp/example_test.go b/exporters/otlp/otlptrace/otlptracehttp/example_test.go
index 56f78af67ef..d67bdf5afea 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/example_test.go
+++ b/exporters/otlp/otlptrace/otlptracehttp/example_test.go
@@ -16,83 +16,27 @@ package otlptracehttp_test
 
 import (
 	"context"
-	"fmt"
-	"log"
 
 	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
 	"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp"
-	"go.opentelemetry.io/otel/sdk/resource"
-	sdktrace "go.opentelemetry.io/otel/sdk/trace"
-	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
-	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/sdk/trace"
 )
 
-const (
-	instrumentationName    = "github.com/instrumentron"
-	instrumentationVersion = "v0.1.0"
-)
-
-var (
-	tracer = otel.GetTracerProvider().Tracer(
-		instrumentationName,
-		trace.WithInstrumentationVersion(instrumentationVersion),
-		trace.WithSchemaURL(semconv.SchemaURL),
-	)
-)
-
-func add(ctx context.Context, x, y int64) int64 {
-	var span trace.Span
-	_, span = tracer.Start(ctx, "Addition")
-	defer span.End()
-
-	return x + y
-}
-
-func multiply(ctx context.Context, x, y int64) int64 {
-	var span trace.Span
-	_, span = tracer.Start(ctx, "Multiplication")
-	defer span.End()
-
-	return x * y
-}
-
-func newResource() *resource.Resource {
-	return resource.NewWithAttributes(
-		semconv.SchemaURL,
-		semconv.ServiceName("otlptrace-example"),
-		semconv.ServiceVersion("0.0.1"),
-	)
-}
-
-func installExportPipeline(ctx context.Context) (func(context.Context) error, error) {
-	client := otlptracehttp.NewClient()
-	exporter, err := otlptrace.New(ctx, client)
-	if err != nil {
-		return nil, fmt.Errorf("creating OTLP trace exporter: %w", err)
-	}
-
-	tracerProvider := sdktrace.NewTracerProvider(
-		sdktrace.WithBatcher(exporter),
-		sdktrace.WithResource(newResource()),
-	)
-	otel.SetTracerProvider(tracerProvider)
-
-	return tracerProvider.Shutdown, nil
-}
-
 func Example() {
 	ctx := context.Background()
-	// Registers a tracer Provider globally.
-	shutdown, err := installExportPipeline(ctx)
+	exp, err := otlptracehttp.New(ctx)
 	if err != nil {
-		log.Fatal(err)
+		panic(err)
 	}
+
+	tracerProvider := trace.NewTracerProvider(trace.WithBatcher(exp))
 	defer func() {
-		if err := shutdown(ctx); err != nil {
-			log.Fatal(err)
+		if err := tracerProvider.Shutdown(ctx); err != nil {
+			panic(err)
 		}
 	}()
+	otel.SetTracerProvider(tracerProvider)
 
-	log.Println("the answer is", add(ctx, multiply(ctx, multiply(ctx, 2, 2), 10), 2))
+	// From here, the tracerProvider can be used by instrumentation to collect
+	// telemetry.
 }
diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.mod b/exporters/otlp/otlptrace/otlptracehttp/go.mod
index f891821c536..0c0be7bc70f 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/go.mod
+++ b/exporters/otlp/otlptrace/otlptracehttp/go.mod
@@ -1,32 +1,33 @@
 module go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
 
-go 1.19
+go 1.20
 
 require (
 	github.com/cenkalti/backoff/v4 v4.2.1
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 	go.opentelemetry.io/proto/otlp v1.0.0
-	google.golang.org/grpc v1.57.0
+	google.golang.org/grpc v1.59.0
 	google.golang.org/protobuf v1.31.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
+	github.com/kr/text v0.2.0 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/net v0.10.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	golang.org/x/text v0.9.0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	golang.org/x/net v0.17.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+	golang.org/x/text v0.13.0 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/exporters/otlp/otlptrace/otlptracehttp/go.sum b/exporters/otlp/otlptrace/otlptracehttp/go.sum
index 0bace07286a..33ae39b89cd 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/go.sum
+++ b/exporters/otlp/otlptrace/otlptracehttp/go.sum
@@ -1,22 +1,24 @@
 github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
 github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
+github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
 github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
 github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@@ -24,20 +26,20 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
 go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e h1:Ao9GzfUMPH3zjVfzXG5rlWlk+Q8MXWKwWpwVQE1MXfw=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc h1:kVKPf/IiYSBWEWtkIn6wZXwWGCnLKcC8oWfZvXjsGnM=
-google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
-google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw=
-google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
+google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
+google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
 google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
diff --git a/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go b/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
index 5e9e8185d15..8016b7a0b88 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
+++ b/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig.go
@@ -174,13 +174,13 @@ func stringToHeader(value string) map[string]string {
 			global.Error(errors.New("missing '="), "parse headers", "input", header)
 			continue
 		}
-		name, err := url.QueryUnescape(n)
+		name, err := url.PathUnescape(n)
 		if err != nil {
 			global.Error(err, "escape header key", "key", n)
 			continue
 		}
 		trimmedName := strings.TrimSpace(name)
-		value, err := url.QueryUnescape(v)
+		value, err := url.PathUnescape(v)
 		if err != nil {
 			global.Error(err, "escape header value", "value", v)
 			continue
diff --git a/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig_test.go b/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig_test.go
index cec506208d5..6cbe0c7ab11 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig_test.go
+++ b/exporters/otlp/otlptrace/otlptracehttp/internal/envconfig/envconfig_test.go
@@ -427,7 +427,12 @@ func TestStringToHeader(t *testing.T) {
 			want:  map[string]string{"userId": "alice"},
 		},
 		{
-			name:  "multiples headers encoded",
+			name:  "simple header conforms to RFC 3986 spec",
+			value: " userId = alice+test ",
+			want:  map[string]string{"userId": "alice+test"},
+		},
+		{
+			name:  "multiple headers encoded",
 			value: "userId=alice,serverNode=DF%3A28,isProduction=false",
 			want: map[string]string{
 				"userId":       "alice",
@@ -435,6 +440,16 @@ func TestStringToHeader(t *testing.T) {
 				"isProduction": "false",
 			},
 		},
+		{
+			name:  "multiple headers encoded per RFC 3986 spec",
+			value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test",
+			want: map[string]string{
+				"userId":       "alice+test",
+				"serverNode":   "DF:28",
+				"isProduction": "false",
+				"namespace":    "localhost/test",
+			},
+		},
 		{
 			name:  "invalid headers format",
 			value: "userId:alice",
diff --git a/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go b/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
index 9a595c36a62..8401bd7f155 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
+++ b/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options.go
@@ -141,9 +141,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
 	if cfg.Traces.Compression == GzipCompression {
 		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
 	}
-	if len(cfg.DialOptions) != 0 {
-		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
-	}
 	if cfg.ReconnectionPeriod != 0 {
 		p := grpc.ConnectParams{
 			Backoff:           backoff.DefaultConfig,
diff --git a/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options_test.go b/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options_test.go
index 5fbda6d460d..4c7a525a996 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options_test.go
+++ b/exporters/otlp/otlptrace/otlptracehttp/internal/otlpconfig/options_test.go
@@ -201,7 +201,7 @@ func TestConfigs(t *testing.T) {
 			},
 			asserts: func(t *testing.T, c *Config, grpcOption bool) {
 				if grpcOption {
-					//TODO: make sure gRPC's credentials actually works
+					// TODO: make sure gRPC's credentials actually works
 					assert.NotNil(t, c.Traces.GRPCCredentials)
 				} else {
 					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
diff --git a/exporters/otlp/otlptrace/otlptracehttp/mock_collector_test.go b/exporters/otlp/otlptrace/otlptracehttp/mock_collector_test.go
index 919a15fa4df..2b87215d183 100644
--- a/exporters/otlp/otlptrace/otlptracehttp/mock_collector_test.go
+++ b/exporters/otlp/otlptrace/otlptracehttp/mock_collector_test.go
@@ -25,6 +25,7 @@ import (
 	"net/http"
 	"sync"
 	"testing"
+	"time"
 
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -241,7 +242,9 @@ func runMockCollector(t *testing.T, cfg mockCollectorConfig) *mockCollector {
 	mux := http.NewServeMux()
 	mux.Handle(cfg.TracesURLPath, http.HandlerFunc(m.serveTraces))
 	server := &http.Server{
-		Handler: mux,
+		Handler:      mux,
+		ReadTimeout:  10 * time.Second,
+		WriteTimeout: 10 * time.Second,
 	}
 	if cfg.WithTLS {
 		pem, err := generateWeakCertificate()
diff --git a/exporters/otlp/otlptrace/version.go b/exporters/otlp/otlptrace/version.go
index db70dc53143..8ee285b8d52 100644
--- a/exporters/otlp/otlptrace/version.go
+++ b/exporters/otlp/otlptrace/version.go
@@ -16,5 +16,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
 
 // Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
 func Version() string {
-	return "1.16.0"
+	return "1.21.0"
 }
diff --git a/exporters/prometheus/config.go b/exporters/prometheus/config.go
index dcaba515900..03ce27b131e 100644
--- a/exporters/prometheus/config.go
+++ b/exporters/prometheus/config.go
@@ -19,18 +19,20 @@ import (
 
 	"github.com/prometheus/client_golang/prometheus"
 
+	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/sdk/metric"
 )
 
 // config contains options for the exporter.
 type config struct {
-	registerer             prometheus.Registerer
-	disableTargetInfo      bool
-	withoutUnits           bool
-	withoutCounterSuffixes bool
-	aggregation            metric.AggregationSelector
-	disableScopeInfo       bool
-	namespace              string
+	registerer               prometheus.Registerer
+	disableTargetInfo        bool
+	withoutUnits             bool
+	withoutCounterSuffixes   bool
+	readerOpts               []metric.ManualReaderOption
+	disableScopeInfo         bool
+	namespace                string
+	resourceAttributesFilter attribute.Filter
 }
 
 // newConfig creates a validated config configured with options.
@@ -47,14 +49,6 @@ func newConfig(opts ...Option) config {
 	return cfg
 }
 
-func (cfg config) manualReaderOptions() []metric.ManualReaderOption {
-	opts := []metric.ManualReaderOption{}
-	if cfg.aggregation != nil {
-		opts = append(opts, metric.WithAggregationSelector(cfg.aggregation))
-	}
-	return opts
-}
-
 // Option sets exporter option values.
 type Option interface {
 	apply(config) config
@@ -81,7 +75,16 @@ func WithRegisterer(reg prometheus.Registerer) Option {
 // used.
 func WithAggregationSelector(agg metric.AggregationSelector) Option {
 	return optionFunc(func(cfg config) config {
-		cfg.aggregation = agg
+		cfg.readerOpts = append(cfg.readerOpts, metric.WithAggregationSelector(agg))
+		return cfg
+	})
+}
+
+// WithProducer configure the metric Producer the exporter will use as a source
+// of external metric data.
+func WithProducer(producer metric.Producer) Option {
+	return optionFunc(func(cfg config) config {
+		cfg.readerOpts = append(cfg.readerOpts, metric.WithProducer(producer))
 		return cfg
 	})
 }
@@ -111,7 +114,7 @@ func WithoutUnits() Option {
 	})
 }
 
-// WithoutUnits disables exporter's addition _total suffixes on counters.
+// WithoutCounterSuffixes disables exporter's addition _total suffixes on counters.
 //
 // By default, metric names include a _total suffix to follow Prometheus naming
 // conventions. For example, the counter metric happy.people would become
@@ -150,3 +153,14 @@ func WithNamespace(ns string) Option {
 		return cfg
 	})
 }
+
+// WithResourceAsConstantLabels configures the Exporter to add the resource attributes the
+// resourceFilter returns true for as attributes on all exported metrics.
+//
+// The does not affect the target info generated from resource attributes.
+func WithResourceAsConstantLabels(resourceFilter attribute.Filter) Option {
+	return optionFunc(func(cfg config) config {
+		cfg.resourceAttributesFilter = resourceFilter
+		return cfg
+	})
+}
diff --git a/exporters/prometheus/config_test.go b/exporters/prometheus/config_test.go
index 3e3ba9c1cb0..b759cfe98f5 100644
--- a/exporters/prometheus/config_test.go
+++ b/exporters/prometheus/config_test.go
@@ -15,18 +15,21 @@
 package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus"
 
 import (
+	"context"
 	"testing"
 
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/stretchr/testify/assert"
 
 	"go.opentelemetry.io/otel/sdk/metric"
+	"go.opentelemetry.io/otel/sdk/metric/metricdata"
 )
 
 func TestNewConfig(t *testing.T) {
 	registry := prometheus.NewRegistry()
 
 	aggregationSelector := func(metric.InstrumentKind) metric.Aggregation { return nil }
+	producer := &noopProducer{}
 
 	testCases := []struct {
 		name       string
@@ -56,6 +59,17 @@ func TestNewConfig(t *testing.T) {
 			},
 			wantConfig: config{
 				registerer: prometheus.DefaultRegisterer,
+				readerOpts: []metric.ManualReaderOption{metric.WithAggregationSelector(aggregationSelector)},
+			},
+		},
+		{
+			name: "WithProducer",
+			options: []Option{
+				WithProducer(producer),
+			},
+			wantConfig: config{
+				registerer: prometheus.DefaultRegisterer,
+				readerOpts: []metric.ManualReaderOption{metric.WithProducer(producer)},
 			},
 		},
 		{
@@ -63,10 +77,15 @@ func TestNewConfig(t *testing.T) {
 			options: []Option{
 				WithRegisterer(registry),
 				WithAggregationSelector(aggregationSelector),
+				WithProducer(producer),
 			},
 
 			wantConfig: config{
 				registerer: registry,
+				readerOpts: []metric.ManualReaderOption{
+					metric.WithAggregationSelector(aggregationSelector),
+					metric.WithProducer(producer),
+				},
 			},
 		},
 		{
@@ -132,38 +151,18 @@ func TestNewConfig(t *testing.T) {
 	for _, tt := range testCases {
 		t.Run(tt.name, func(t *testing.T) {
 			cfg := newConfig(tt.options...)
-			// tested by TestConfigManualReaderOptions
-			cfg.aggregation = nil
+			// only check the length of readerOpts, since they are not comparable
+			assert.Equal(t, len(tt.wantConfig.readerOpts), len(cfg.readerOpts))
+			cfg.readerOpts = nil
+			tt.wantConfig.readerOpts = nil
 
 			assert.Equal(t, tt.wantConfig, cfg)
 		})
 	}
 }
 
-func TestConfigManualReaderOptions(t *testing.T) {
-	aggregationSelector := func(metric.InstrumentKind) metric.Aggregation { return nil }
-
-	testCases := []struct {
-		name            string
-		config          config
-		wantOptionCount int
-	}{
-		{
-			name:            "Default",
-			config:          config{},
-			wantOptionCount: 0,
-		},
+type noopProducer struct{}
 
-		{
-			name:            "WithAggregationSelector",
-			config:          config{aggregation: aggregationSelector},
-			wantOptionCount: 1,
-		},
-	}
-	for _, tt := range testCases {
-		t.Run(tt.name, func(t *testing.T) {
-			opts := tt.config.manualReaderOptions()
-			assert.Len(t, opts, tt.wantOptionCount)
-		})
-	}
+func (*noopProducer) Produce(ctx context.Context) ([]metricdata.ScopeMetrics, error) {
+	return nil, nil
 }
diff --git a/exporters/prometheus/exporter.go b/exporters/prometheus/exporter.go
index 81b12254f85..16df309be44 100644
--- a/exporters/prometheus/exporter.go
+++ b/exporters/prometheus/exporter.go
@@ -45,7 +45,11 @@ const (
 	scopeInfoDescription = "Instrumentation Scope metadata"
 )
 
-var scopeInfoKeys = [2]string{"otel_scope_name", "otel_scope_version"}
+var (
+	scopeInfoKeys = [2]string{"otel_scope_name", "otel_scope_version"}
+
+	errScopeInvalid = errors.New("invalid scope")
+)
 
 // Exporter is a Prometheus Exporter that embeds the OTel metric.Reader
 // interface for easy instantiation with a MeterProvider.
@@ -74,20 +78,29 @@ func (e *Exporter) MarshalLog() interface{} {
 
 var _ metric.Reader = &Exporter{}
 
+// keyVals is used to store resource attribute key value pairs.
+type keyVals struct {
+	keys []string
+	vals []string
+}
+
 // collector is used to implement prometheus.Collector.
 type collector struct {
 	reader metric.Reader
 
-	withoutUnits           bool
-	withoutCounterSuffixes bool
-	disableScopeInfo       bool
-	namespace              string
+	withoutUnits             bool
+	withoutCounterSuffixes   bool
+	disableScopeInfo         bool
+	namespace                string
+	resourceAttributesFilter attribute.Filter
 
 	mu                sync.Mutex // mu protects all members below from the concurrent access.
 	disableTargetInfo bool
 	targetInfo        prometheus.Metric
 	scopeInfos        map[instrumentation.Scope]prometheus.Metric
+	scopeInfosInvalid map[instrumentation.Scope]struct{}
 	metricFamilies    map[string]*dto.MetricFamily
+	resourceKeyVals   keyVals
 }
 
 // prometheus counters MUST have a _total suffix by default:
@@ -101,17 +114,19 @@ func New(opts ...Option) (*Exporter, error) {
 	// this assumes that the default temporality selector will always return cumulative.
 	// we only support cumulative temporality, so building our own reader enforces this.
 	// TODO (#3244): Enable some way to configure the reader, but not change temporality.
-	reader := metric.NewManualReader(cfg.manualReaderOptions()...)
+	reader := metric.NewManualReader(cfg.readerOpts...)
 
 	collector := &collector{
-		reader:                 reader,
-		disableTargetInfo:      cfg.disableTargetInfo,
-		withoutUnits:           cfg.withoutUnits,
-		withoutCounterSuffixes: cfg.withoutCounterSuffixes,
-		disableScopeInfo:       cfg.disableScopeInfo,
-		scopeInfos:             make(map[instrumentation.Scope]prometheus.Metric),
-		metricFamilies:         make(map[string]*dto.MetricFamily),
-		namespace:              cfg.namespace,
+		reader:                   reader,
+		disableTargetInfo:        cfg.disableTargetInfo,
+		withoutUnits:             cfg.withoutUnits,
+		withoutCounterSuffixes:   cfg.withoutCounterSuffixes,
+		disableScopeInfo:         cfg.disableScopeInfo,
+		scopeInfos:               make(map[instrumentation.Scope]prometheus.Metric),
+		scopeInfosInvalid:        make(map[instrumentation.Scope]struct{}),
+		metricFamilies:           make(map[string]*dto.MetricFamily),
+		namespace:                cfg.namespace,
+		resourceAttributesFilter: cfg.resourceAttributesFilter,
 	}
 
 	if err := cfg.registerer.Register(collector); err != nil {
@@ -142,8 +157,11 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
 	metrics := metricdata.ResourceMetrics{}
 	err := c.reader.Collect(context.TODO(), &metrics)
 	if err != nil {
+		if errors.Is(err, metric.ErrReaderShutdown) {
+			return
+		}
 		otel.Handle(err)
-		if err == metric.ErrReaderNotRegistered {
+		if errors.Is(err, metric.ErrReaderNotRegistered) {
 			return
 		}
 	}
@@ -172,11 +190,19 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
 		ch <- c.targetInfo
 	}
 
+	if c.resourceAttributesFilter != nil && len(c.resourceKeyVals.keys) == 0 {
+		c.createResourceAttributes(metrics.Resource)
+	}
+
 	for _, scopeMetrics := range metrics.ScopeMetrics {
 		var keys, values [2]string
 
 		if !c.disableScopeInfo {
 			scopeInfo, err := c.scopeInfo(scopeMetrics.Scope)
+			if err == errScopeInvalid {
+				// Do not report the same error multiple times.
+				continue
+			}
 			if err != nil {
 				otel.Handle(err)
 				continue
@@ -206,26 +232,26 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) {
 
 			switch v := m.Data.(type) {
 			case metricdata.Histogram[int64]:
-				addHistogramMetric(ch, v, m, keys, values, name)
+				addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
 			case metricdata.Histogram[float64]:
-				addHistogramMetric(ch, v, m, keys, values, name)
+				addHistogramMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
 			case metricdata.Sum[int64]:
-				addSumMetric(ch, v, m, keys, values, name)
+				addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
 			case metricdata.Sum[float64]:
-				addSumMetric(ch, v, m, keys, values, name)
+				addSumMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
 			case metricdata.Gauge[int64]:
-				addGaugeMetric(ch, v, m, keys, values, name)
+				addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
 			case metricdata.Gauge[float64]:
-				addGaugeMetric(ch, v, m, keys, values, name)
+				addGaugeMetric(ch, v, m, keys, values, name, c.resourceKeyVals)
 			}
 		}
 	}
 }
 
-func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string) {
+func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogram metricdata.Histogram[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) {
 	// TODO(https://github.com/open-telemetry/opentelemetry-go/issues/3163): support exemplars
 	for _, dp := range histogram.DataPoints {
-		keys, values := getAttrs(dp.Attributes, ks, vs)
+		keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV)
 
 		desc := prometheus.NewDesc(name, m.Description, keys, nil)
 		buckets := make(map[float64]uint64, len(dp.Bounds))
@@ -244,14 +270,14 @@ func addHistogramMetric[N int64 | float64](ch chan<- prometheus.Metric, histogra
 	}
 }
 
-func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, ks, vs [2]string, name string) {
+func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata.Sum[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) {
 	valueType := prometheus.CounterValue
 	if !sum.IsMonotonic {
 		valueType = prometheus.GaugeValue
 	}
 
 	for _, dp := range sum.DataPoints {
-		keys, values := getAttrs(dp.Attributes, ks, vs)
+		keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV)
 
 		desc := prometheus.NewDesc(name, m.Description, keys, nil)
 		m, err := prometheus.NewConstMetric(desc, valueType, float64(dp.Value), values...)
@@ -263,9 +289,9 @@ func addSumMetric[N int64 | float64](ch chan<- prometheus.Metric, sum metricdata
 	}
 }
 
-func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, ks, vs [2]string, name string) {
+func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metricdata.Gauge[N], m metricdata.Metrics, ks, vs [2]string, name string, resourceKV keyVals) {
 	for _, dp := range gauge.DataPoints {
-		keys, values := getAttrs(dp.Attributes, ks, vs)
+		keys, values := getAttrs(dp.Attributes, ks, vs, resourceKV)
 
 		desc := prometheus.NewDesc(name, m.Description, keys, nil)
 		m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(dp.Value), values...)
@@ -280,7 +306,7 @@ func addGaugeMetric[N int64 | float64](ch chan<- prometheus.Metric, gauge metric
 // getAttrs parses the attribute.Set to two lists of matching Prometheus-style
 // keys and values. It sanitizes invalid characters and handles duplicate keys
 // (due to sanitization) by sorting and concatenating the values following the spec.
-func getAttrs(attrs attribute.Set, ks, vs [2]string) ([]string, []string) {
+func getAttrs(attrs attribute.Set, ks, vs [2]string, resourceKV keyVals) ([]string, []string) {
 	keysMap := make(map[string][]string)
 	itr := attrs.Iter()
 	for itr.Next() {
@@ -308,11 +334,17 @@ func getAttrs(attrs attribute.Set, ks, vs [2]string) ([]string, []string) {
 		keys = append(keys, ks[:]...)
 		values = append(values, vs[:]...)
 	}
+
+	for idx := range resourceKV.keys {
+		keys = append(keys, resourceKV.keys[idx])
+		values = append(values, resourceKV.vals[idx])
+	}
+
 	return keys, values
 }
 
 func createInfoMetric(name, description string, res *resource.Resource) (prometheus.Metric, error) {
-	keys, values := getAttrs(*res.Set(), [2]string{}, [2]string{})
+	keys, values := getAttrs(*res.Set(), [2]string{}, [2]string{}, keyVals{})
 	desc := prometheus.NewDesc(name, description, keys, nil)
 	return prometheus.NewConstMetric(desc, prometheus.GaugeValue, float64(1), values...)
 }
@@ -460,6 +492,15 @@ func (c *collector) metricType(m metricdata.Metrics) *dto.MetricType {
 	return nil
 }
 
+func (c *collector) createResourceAttributes(res *resource.Resource) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	resourceAttrs, _ := res.Set().Filter(c.resourceAttributesFilter)
+	resourceKeys, resourceValues := getAttrs(resourceAttrs, [2]string{}, [2]string{}, keyVals{})
+	c.resourceKeyVals = keyVals{keys: resourceKeys, vals: resourceValues}
+}
+
 func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, error) {
 	c.mu.Lock()
 	defer c.mu.Unlock()
@@ -469,8 +510,13 @@ func (c *collector) scopeInfo(scope instrumentation.Scope) (prometheus.Metric, e
 		return scopeInfo, nil
 	}
 
+	if _, ok := c.scopeInfosInvalid[scope]; ok {
+		return nil, errScopeInvalid
+	}
+
 	scopeInfo, err := createScopeInfoMetric(scope)
 	if err != nil {
+		c.scopeInfosInvalid[scope] = struct{}{}
 		return nil, fmt.Errorf("cannot create scope info metric: %w", err)
 	}
 
diff --git a/exporters/prometheus/exporter_test.go b/exporters/prometheus/exporter_test.go
index 86cc8dc0b20..cb402e10ced 100644
--- a/exporters/prometheus/exporter_test.go
+++ b/exporters/prometheus/exporter_test.go
@@ -16,6 +16,8 @@ package prometheus
 
 import (
 	"context"
+	"errors"
+	"io"
 	"os"
 	"sync"
 	"testing"
@@ -25,6 +27,7 @@ import (
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 
+	"go.opentelemetry.io/otel"
 	"go.opentelemetry.io/otel/attribute"
 	otelmetric "go.opentelemetry.io/otel/metric"
 	"go.opentelemetry.io/otel/sdk/metric"
@@ -365,6 +368,46 @@ func TestPrometheusExporter(t *testing.T) {
 				counter.Add(ctx, 9, opt)
 			},
 		},
+		{
+			name:         "with resource attributes filter",
+			expectedFile: "testdata/with_resource_attributes_filter.txt",
+			options: []Option{
+				WithResourceAsConstantLabels(attribute.NewDenyKeysFilter()),
+			},
+			recordMetrics: func(ctx context.Context, meter otelmetric.Meter) {
+				opt := otelmetric.WithAttributes(
+					attribute.Key("A").String("B"),
+					attribute.Key("C").String("D"),
+					attribute.Key("E").Bool(true),
+					attribute.Key("F").Int(42),
+				)
+				counter, err := meter.Float64Counter("foo", otelmetric.WithDescription("a simple counter"))
+				require.NoError(t, err)
+				counter.Add(ctx, 5, opt)
+				counter.Add(ctx, 10.1, opt)
+				counter.Add(ctx, 9.8, opt)
+			},
+		},
+		{
+			name:         "with some resource attributes filter",
+			expectedFile: "testdata/with_allow_resource_attributes_filter.txt",
+			options: []Option{
+				WithResourceAsConstantLabels(attribute.NewAllowKeysFilter("service.name")),
+			},
+			recordMetrics: func(ctx context.Context, meter otelmetric.Meter) {
+				opt := otelmetric.WithAttributes(
+					attribute.Key("A").String("B"),
+					attribute.Key("C").String("D"),
+					attribute.Key("E").Bool(true),
+					attribute.Key("F").Int(42),
+				)
+				counter, err := meter.Float64Counter("foo", otelmetric.WithDescription("a simple counter"))
+				require.NoError(t, err)
+				counter.Add(ctx, 5, opt)
+				counter.Add(ctx, 5.9, opt)
+				counter.Add(ctx, 5.3, opt)
+			},
+		},
 	}
 
 	for _, tc := range testCases {
@@ -744,7 +787,7 @@ func TestDuplicateMetrics(t *testing.T) {
 
 			tc.recordMetrics(ctx, meterA, meterB)
 
-			var match = false
+			match := false
 			for _, filename := range tc.possibleExpectedFiles {
 				file, ferr := os.Open(filename)
 				require.NoError(t, ferr)
@@ -790,6 +833,14 @@ func TestCollectorConcurrentSafe(t *testing.T) {
 }
 
 func TestIncompatibleMeterName(t *testing.T) {
+	defer func(orig otel.ErrorHandler) {
+		otel.SetErrorHandler(orig)
+	}(otel.GetErrorHandler())
+
+	errs := []error{}
+	eh := otel.ErrorHandlerFunc(func(e error) { errs = append(errs, e) })
+	otel.SetErrorHandler(eh)
+
 	// This test checks that Prometheus exporter ignores
 	// when it encounters incompatible meter name.
 
@@ -815,4 +866,46 @@ func TestIncompatibleMeterName(t *testing.T) {
 
 	err = testutil.GatherAndCompare(registry, file)
 	require.NoError(t, err)
+
+	assert.Equal(t, 1, len(errs))
+
+	// A second collect shouldn't trigger new errors
+	_, err = file.Seek(0, io.SeekStart)
+	assert.NoError(t, err)
+	err = testutil.GatherAndCompare(registry, file)
+	require.NoError(t, err)
+	assert.Equal(t, 1, len(errs))
+}
+
+func TestShutdownExporter(t *testing.T) {
+	var handledError error
+	eh := otel.ErrorHandlerFunc(func(e error) { handledError = errors.Join(handledError, e) })
+	otel.SetErrorHandler(eh)
+
+	ctx := context.Background()
+	registry := prometheus.NewRegistry()
+
+	for i := 0; i < 3; i++ {
+		exporter, err := New(WithRegisterer(registry))
+		require.NoError(t, err)
+		provider := metric.NewMeterProvider(
+			metric.WithResource(resource.Default()),
+			metric.WithReader(exporter))
+		meter := provider.Meter("testmeter")
+		cnt, err := meter.Int64Counter("foo")
+		require.NoError(t, err)
+		cnt.Add(ctx, 100)
+
+		// verify that metrics added to a previously shutdown MeterProvider
+		// do not conflict with metrics added in this loop.
+		_, err = registry.Gather()
+		require.NoError(t, err)
+
+		// Shutdown should cause future prometheus Gather() calls to no longer
+		// include metrics from this loop's MeterProvider.
+		err = provider.Shutdown(ctx)
+		require.NoError(t, err)
+	}
+	// ensure we aren't unnecessarily logging errors from the shutdown MeterProvider
+	require.NoError(t, handledError)
 }
diff --git a/exporters/prometheus/go.mod b/exporters/prometheus/go.mod
index 8c412831e9b..4c6b075908c 100644
--- a/exporters/prometheus/go.mod
+++ b/exporters/prometheus/go.mod
@@ -1,15 +1,15 @@
 module go.opentelemetry.io/otel/exporters/prometheus
 
-go 1.19
+go 1.20
 
 require (
-	github.com/prometheus/client_golang v1.16.0
-	github.com/prometheus/client_model v0.4.0
+	github.com/prometheus/client_golang v1.17.0
+	github.com/prometheus/client_model v0.5.0
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/metric v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/sdk/metric v0.39.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/metric v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/sdk/metric v1.21.0
 	google.golang.org/protobuf v1.31.0
 )
 
@@ -17,17 +17,16 @@ require (
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/cespare/xxhash/v2 v2.2.0 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang/protobuf v1.5.3 // indirect
 	github.com/kr/text v0.2.0 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/prometheus/common v0.42.0 // indirect
-	github.com/prometheus/procfs v0.10.1 // indirect
-	github.com/rogpeppe/go-internal v1.10.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	github.com/prometheus/common v0.44.0 // indirect
+	github.com/prometheus/procfs v0.11.1 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/exporters/prometheus/go.sum b/exporters/prometheus/go.sum
index 78b97014e2c..efd6536daa6 100644
--- a/exporters/prometheus/go.sum
+++ b/exporters/prometheus/go.sum
@@ -6,8 +6,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -15,7 +15,7 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
 github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
 github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
 github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
@@ -23,21 +23,20 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk
 github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
-github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
-github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
-github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
-github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
-github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
-github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
-github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
 github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
diff --git a/exporters/prometheus/testdata/with_allow_resource_attributes_filter.txt b/exporters/prometheus/testdata/with_allow_resource_attributes_filter.txt
new file mode 100755
index 00000000000..9a4ca7793ea
--- /dev/null
+++ b/exporters/prometheus/testdata/with_allow_resource_attributes_filter.txt
@@ -0,0 +1,9 @@
+# HELP foo_total a simple counter
+# TYPE foo_total counter
+foo_total{A="B",C="D",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0",service_name="prometheus_test"} 16.2
+# HELP otel_scope_info Instrumentation Scope metadata
+# TYPE otel_scope_info gauge
+otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1
+# HELP target_info Target metadata
+# TYPE target_info gauge
+target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1
diff --git a/exporters/prometheus/testdata/with_resource_attributes_filter.txt b/exporters/prometheus/testdata/with_resource_attributes_filter.txt
new file mode 100755
index 00000000000..b3ab4f80fab
--- /dev/null
+++ b/exporters/prometheus/testdata/with_resource_attributes_filter.txt
@@ -0,0 +1,9 @@
+# HELP foo_total a simple counter
+# TYPE foo_total counter
+foo_total{A="B",C="D",E="true",F="42",otel_scope_name="testmeter",otel_scope_version="v0.1.0",service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 24.9
+# HELP otel_scope_info Instrumentation Scope metadata
+# TYPE otel_scope_info gauge
+otel_scope_info{otel_scope_name="testmeter",otel_scope_version="v0.1.0"} 1
+# HELP target_info Target metadata
+# TYPE target_info gauge
+target_info{service_name="prometheus_test",telemetry_sdk_language="go",telemetry_sdk_name="opentelemetry",telemetry_sdk_version="latest"} 1
diff --git a/exporters/stdout/stdoutmetric/config.go b/exporters/stdout/stdoutmetric/config.go
index 6189c019f37..cac5afeeb67 100644
--- a/exporters/stdout/stdoutmetric/config.go
+++ b/exporters/stdout/stdoutmetric/config.go
@@ -15,6 +15,7 @@ package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdout
 
 import (
 	"encoding/json"
+	"io"
 	"os"
 
 	"go.opentelemetry.io/otel/sdk/metric"
@@ -22,6 +23,7 @@ import (
 
 // config contains options for the exporter.
 type config struct {
+	prettyPrint         bool
 	encoder             *encoderHolder
 	temporalitySelector metric.TemporalitySelector
 	aggregationSelector metric.AggregationSelector
@@ -37,10 +39,15 @@ func newConfig(options ...Option) config {
 
 	if cfg.encoder == nil {
 		enc := json.NewEncoder(os.Stdout)
-		enc.SetIndent("", "\t")
 		cfg.encoder = &encoderHolder{encoder: enc}
 	}
 
+	if cfg.prettyPrint {
+		if e, ok := cfg.encoder.encoder.(*json.Encoder); ok {
+			e.SetIndent("", "\t")
+		}
+	}
+
 	if cfg.temporalitySelector == nil {
 		cfg.temporalitySelector = metric.DefaultTemporalitySelector
 	}
@@ -74,6 +81,22 @@ func WithEncoder(encoder Encoder) Option {
 	})
 }
 
+// WithWriter sets the export stream destination.
+// Using this option overrides any previously set encoder.
+func WithWriter(w io.Writer) Option {
+	return WithEncoder(json.NewEncoder(w))
+}
+
+// WithPrettyPrint prettifies the emitted output.
+// This option only works if the encoder is a *json.Encoder, as is the case
+// when using `WithWriter`.
+func WithPrettyPrint() Option {
+	return optionFunc(func(c config) config {
+		c.prettyPrint = true
+		return c
+	})
+}
+
 // WithTemporalitySelector sets the TemporalitySelector the exporter will use
 // to determine the Temporality of an instrument based on its kind. If this
 // option is not used, the exporter will use the DefaultTemporalitySelector
diff --git a/exporters/stdout/stdoutmetric/example_test.go b/exporters/stdout/stdoutmetric/example_test.go
index a3e7377914f..6cc978ebbed 100644
--- a/exporters/stdout/stdoutmetric/example_test.go
+++ b/exporters/stdout/stdoutmetric/example_test.go
@@ -31,7 +31,7 @@ import (
 
 var (
 	// Sat Jan 01 2000 00:00:00 GMT+0000.
-	now = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
+	now = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0))
 
 	res = resource.NewSchemaless(
 		semconv.ServiceName("stdoutmetric-example"),
@@ -41,7 +41,7 @@ var (
 		Resource: res,
 		ScopeMetrics: []metricdata.ScopeMetrics{
 			{
-				Scope: instrumentation.Scope{Name: "example", Version: "v0.0.1"},
+				Scope: instrumentation.Scope{Name: "example", Version: "0.0.1"},
 				Metrics: []metricdata.Metrics{
 					{
 						Name:        "requests",
@@ -158,7 +158,7 @@ func Example() {
 	// Ensure the periodic reader is cleaned up by shutting down the sdk.
 	_ = sdk.Shutdown(ctx)
 
-	//Output:
+	// Output:
 	// {
 	//   "Resource": [
 	//     {
@@ -173,7 +173,7 @@ func Example() {
 	//     {
 	//       "Scope": {
 	//         "Name": "example",
-	//         "Version": "v0.0.1",
+	//         "Version": "0.0.1",
 	//         "SchemaURL": ""
 	//       },
 	//       "Metrics": [
diff --git a/exporters/stdout/stdoutmetric/exporter.go b/exporters/stdout/stdoutmetric/exporter.go
index c223a84da59..faedf9a9100 100644
--- a/exporters/stdout/stdoutmetric/exporter.go
+++ b/exporters/stdout/stdoutmetric/exporter.go
@@ -106,9 +106,7 @@ func redactTimestamps(orig *metricdata.ResourceMetrics) {
 	}
 }
 
-var (
-	errUnknownAggType = errors.New("unknown aggregation type")
-)
+var errUnknownAggType = errors.New("unknown aggregation type")
 
 func redactAggregationTimestamps(orig metricdata.Aggregation) metricdata.Aggregation {
 	switch a := orig.(type) {
diff --git a/exporters/stdout/stdoutmetric/exporter_test.go b/exporters/stdout/stdoutmetric/exporter_test.go
index 71679d623a1..2dbfe6357a2 100644
--- a/exporters/stdout/stdoutmetric/exporter_test.go
+++ b/exporters/stdout/stdoutmetric/exporter_test.go
@@ -15,6 +15,7 @@
 package stdoutmetric_test // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric"
 
 import (
+	"bytes"
 	"context"
 	"encoding/json"
 	"io"
@@ -103,6 +104,43 @@ func deltaSelector(metric.InstrumentKind) metricdata.Temporality {
 	return metricdata.DeltaTemporality
 }
 
+func TestExportWithOptions(t *testing.T) {
+	var (
+		data = new(metricdata.ResourceMetrics)
+		ctx  = context.Background()
+	)
+
+	for _, tt := range []struct {
+		name string
+		opts []stdoutmetric.Option
+
+		expectedData string
+	}{
+		{
+			name:         "with no options",
+			expectedData: "{\"Resource\":null,\"ScopeMetrics\":null}\n",
+		},
+		{
+			name: "with pretty print",
+			opts: []stdoutmetric.Option{
+				stdoutmetric.WithPrettyPrint(),
+			},
+			expectedData: "{\n\t\"Resource\": null,\n\t\"ScopeMetrics\": null\n}\n",
+		},
+	} {
+		t.Run(tt.name, func(t *testing.T) {
+			var b bytes.Buffer
+			opts := append(tt.opts, stdoutmetric.WithWriter(&b))
+
+			exp, err := stdoutmetric.New(opts...)
+			require.NoError(t, err)
+			require.NoError(t, exp.Export(ctx, data))
+
+			assert.Equal(t, tt.expectedData, b.String())
+		})
+	}
+}
+
 func TestTemporalitySelector(t *testing.T) {
 	exp, err := stdoutmetric.New(
 		testEncoderOption(),
diff --git a/exporters/stdout/stdoutmetric/go.mod b/exporters/stdout/stdoutmetric/go.mod
index 2d269fbd80b..f00e7131d84 100644
--- a/exporters/stdout/stdoutmetric/go.mod
+++ b/exporters/stdout/stdoutmetric/go.mod
@@ -1,22 +1,22 @@
 module go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
 
-go 1.19
+go 1.20
 
 require (
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/sdk/metric v0.39.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/sdk/metric v1.21.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/exporters/stdout/stdoutmetric/go.sum b/exporters/stdout/stdoutmetric/go.sum
index 594aa686923..94020957893 100644
--- a/exporters/stdout/stdoutmetric/go.sum
+++ b/exporters/stdout/stdoutmetric/go.sum
@@ -1,17 +1,17 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/exporters/stdout/stdouttrace/config.go b/exporters/stdout/stdouttrace/config.go
index 2cb534a75e9..2d765435495 100644
--- a/exporters/stdout/stdouttrace/config.go
+++ b/exporters/stdout/stdouttrace/config.go
@@ -71,7 +71,7 @@ func (o writerOption) apply(cfg config) config {
 	return cfg
 }
 
-// WithPrettyPrint sets the export stream format to use JSON.
+// WithPrettyPrint prettifies the emitted output.
 func WithPrettyPrint() Option {
 	return prettyPrintOption(true)
 }
diff --git a/exporters/stdout/stdouttrace/example_test.go b/exporters/stdout/stdouttrace/example_test.go
index 5e3a5064568..0abd72d10e9 100644
--- a/exporters/stdout/stdouttrace/example_test.go
+++ b/exporters/stdout/stdouttrace/example_test.go
@@ -29,15 +29,13 @@ import (
 
 const (
 	instrumentationName    = "github.com/instrumentron"
-	instrumentationVersion = "v0.1.0"
+	instrumentationVersion = "0.1.0"
 )
 
-var (
-	tracer = otel.GetTracerProvider().Tracer(
-		instrumentationName,
-		trace.WithInstrumentationVersion(instrumentationVersion),
-		trace.WithSchemaURL(semconv.SchemaURL),
-	)
+var tracer = otel.GetTracerProvider().Tracer(
+	instrumentationName,
+	trace.WithInstrumentationVersion(instrumentationVersion),
+	trace.WithSchemaURL(semconv.SchemaURL),
 )
 
 func add(ctx context.Context, x, y int64) int64 {
diff --git a/exporters/stdout/stdouttrace/go.mod b/exporters/stdout/stdouttrace/go.mod
index edfdded4033..d5425c4a842 100644
--- a/exporters/stdout/stdouttrace/go.mod
+++ b/exporters/stdout/stdouttrace/go.mod
@@ -1,6 +1,6 @@
 module go.opentelemetry.io/otel/exporters/stdout/stdouttrace
 
-go 1.19
+go 1.20
 
 replace (
 	go.opentelemetry.io/otel => ../../..
@@ -9,18 +9,18 @@ replace (
 
 require (
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/exporters/stdout/stdouttrace/go.sum b/exporters/stdout/stdouttrace/go.sum
index 594aa686923..94020957893 100644
--- a/exporters/stdout/stdouttrace/go.sum
+++ b/exporters/stdout/stdouttrace/go.sum
@@ -1,17 +1,17 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/exporters/zipkin/go.mod b/exporters/zipkin/go.mod
index ff97e81e252..efc75303640 100644
--- a/exporters/zipkin/go.mod
+++ b/exporters/zipkin/go.mod
@@ -1,23 +1,23 @@
 module go.opentelemetry.io/otel/exporters/zipkin
 
-go 1.19
+go 1.20
 
 require (
-	github.com/go-logr/logr v1.2.4
+	github.com/go-logr/logr v1.3.0
 	github.com/go-logr/stdr v1.2.2
-	github.com/google/go-cmp v0.5.9
+	github.com/google/go-cmp v0.6.0
 	github.com/openzipkin/zipkin-go v0.4.2
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/exporters/zipkin/go.sum b/exporters/zipkin/go.sum
index 1c34c9c928e..6d37533b753 100644
--- a/exporters/zipkin/go.sum
+++ b/exporters/zipkin/go.sum
@@ -1,20 +1,20 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA=
 github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/exporters/zipkin/internal/internaltest/harness.go b/exporters/zipkin/internal/internaltest/harness.go
index d8118d4f3b5..53e8b3bf113 100644
--- a/exporters/zipkin/internal/internaltest/harness.go
+++ b/exporters/zipkin/internal/internaltest/harness.go
@@ -263,7 +263,7 @@ func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) {
 }
 
 func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
-	var methods = map[string]func(span trace.Span){
+	methods := map[string]func(span trace.Span){
 		"#End": func(span trace.Span) {
 			span.End()
 		},
@@ -283,7 +283,7 @@ func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
 			span.SetAttributes(attribute.String("key1", "value"), attribute.Int("key2", 123))
 		},
 	}
-	var mechanisms = map[string]func() trace.Span{
+	mechanisms := map[string]func() trace.Span{
 		"Span created via Tracer#Start": func() trace.Span {
 			tracer := tracerFactory()
 			_, subject := tracer.Start(context.Background(), "test")
diff --git a/exporters/zipkin/internal/internaltest/text_map_carrier_test.go b/exporters/zipkin/internal/internaltest/text_map_carrier_test.go
index faf713cc2d0..086c8af26ea 100644
--- a/exporters/zipkin/internal/internaltest/text_map_carrier_test.go
+++ b/exporters/zipkin/internal/internaltest/text_map_carrier_test.go
@@ -22,9 +22,7 @@ import (
 	"testing"
 )
 
-var (
-	key, value = "test", "true"
-)
+var key, value = "test", "true"
 
 func TestTextMapCarrierKeys(t *testing.T) {
 	tmc := NewTextMapCarrier(map[string]string{key: value})
diff --git a/exporters/zipkin/internal/matchers/expectation.go b/exporters/zipkin/internal/matchers/expectation.go
index 411300d5819..f890ca92790 100644
--- a/exporters/zipkin/internal/matchers/expectation.go
+++ b/exporters/zipkin/internal/matchers/expectation.go
@@ -27,9 +27,7 @@ import (
 	"time"
 )
 
-var (
-	stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`)
-)
+var stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`)
 
 type Expectation struct {
 	t      *testing.T
diff --git a/exporters/zipkin/zipkin_test.go b/exporters/zipkin/zipkin_test.go
index cc90b5f789d..ad720a042b2 100644
--- a/exporters/zipkin/zipkin_test.go
+++ b/exporters/zipkin/zipkin_test.go
@@ -120,7 +120,9 @@ func startMockZipkinCollector(t *testing.T) *mockZipkinCollector {
 	require.NoError(t, err)
 	collector.url = fmt.Sprintf("http://%s", listener.Addr().String())
 	server := &http.Server{
-		Handler: http.HandlerFunc(collector.handler),
+		Handler:      http.HandlerFunc(collector.handler),
+		ReadTimeout:  10 * time.Second,
+		WriteTimeout: 10 * time.Second,
 	}
 	collector.server = server
 	wg := &sync.WaitGroup{}
diff --git a/go.mod b/go.mod
index d6483834b99..e8b192b3f78 100644
--- a/go.mod
+++ b/go.mod
@@ -1,14 +1,14 @@
 module go.opentelemetry.io/otel
 
-go 1.19
+go 1.20
 
 require (
-	github.com/go-logr/logr v1.2.4
+	github.com/go-logr/logr v1.3.0
 	github.com/go-logr/stdr v1.2.2
-	github.com/google/go-cmp v0.5.9
+	github.com/google/go-cmp v0.6.0
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel/metric v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
+	go.opentelemetry.io/otel/metric v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
 )
 
 require (
diff --git a/go.sum b/go.sum
index d58b1a3b04c..95b35a74664 100644
--- a/go.sum
+++ b/go.sum
@@ -1,12 +1,12 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
diff --git a/internal/attribute/attribute_test.go b/internal/attribute/attribute_test.go
index 3cba91173de..05f4a0a5397 100644
--- a/internal/attribute/attribute_test.go
+++ b/internal/attribute/attribute_test.go
@@ -39,16 +39,20 @@ var wrapBoolSliceValue = func(v interface{}) interface{} {
 	}
 	return nil
 }
+
 var wrapStringSliceValue = func(v interface{}) interface{} {
 	if vi, ok := v.([]string); ok {
 		return StringSliceValue(vi)
 	}
 	return nil
 }
-var wrapAsBoolSlice = func(v interface{}) interface{} { return AsBoolSlice(v) }
-var wrapAsInt64Slice = func(v interface{}) interface{} { return AsInt64Slice(v) }
-var wrapAsFloat64Slice = func(v interface{}) interface{} { return AsFloat64Slice(v) }
-var wrapAsStringSlice = func(v interface{}) interface{} { return AsStringSlice(v) }
+
+var (
+	wrapAsBoolSlice    = func(v interface{}) interface{} { return AsBoolSlice(v) }
+	wrapAsInt64Slice   = func(v interface{}) interface{} { return AsInt64Slice(v) }
+	wrapAsFloat64Slice = func(v interface{}) interface{} { return AsFloat64Slice(v) }
+	wrapAsStringSlice  = func(v interface{}) interface{} { return AsStringSlice(v) }
+)
 
 func TestSliceValue(t *testing.T) {
 	type args struct {
diff --git a/internal/global/instruments.go b/internal/global/instruments.go
index a33eded872a..ebb13c20678 100644
--- a/internal/global/instruments.go
+++ b/internal/global/instruments.go
@@ -34,11 +34,13 @@ type afCounter struct {
 	name string
 	opts []metric.Float64ObservableCounterOption
 
-	delegate atomic.Value //metric.Float64ObservableCounter
+	delegate atomic.Value // metric.Float64ObservableCounter
 }
 
-var _ unwrapper = (*afCounter)(nil)
-var _ metric.Float64ObservableCounter = (*afCounter)(nil)
+var (
+	_ unwrapper                       = (*afCounter)(nil)
+	_ metric.Float64ObservableCounter = (*afCounter)(nil)
+)
 
 func (i *afCounter) setDelegate(m metric.Meter) {
 	ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
@@ -63,11 +65,13 @@ type afUpDownCounter struct {
 	name string
 	opts []metric.Float64ObservableUpDownCounterOption
 
-	delegate atomic.Value //metric.Float64ObservableUpDownCounter
+	delegate atomic.Value // metric.Float64ObservableUpDownCounter
 }
 
-var _ unwrapper = (*afUpDownCounter)(nil)
-var _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
+var (
+	_ unwrapper                             = (*afUpDownCounter)(nil)
+	_ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
+)
 
 func (i *afUpDownCounter) setDelegate(m metric.Meter) {
 	ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
@@ -92,11 +96,13 @@ type afGauge struct {
 	name string
 	opts []metric.Float64ObservableGaugeOption
 
-	delegate atomic.Value //metric.Float64ObservableGauge
+	delegate atomic.Value // metric.Float64ObservableGauge
 }
 
-var _ unwrapper = (*afGauge)(nil)
-var _ metric.Float64ObservableGauge = (*afGauge)(nil)
+var (
+	_ unwrapper                     = (*afGauge)(nil)
+	_ metric.Float64ObservableGauge = (*afGauge)(nil)
+)
 
 func (i *afGauge) setDelegate(m metric.Meter) {
 	ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
@@ -121,11 +127,13 @@ type aiCounter struct {
 	name string
 	opts []metric.Int64ObservableCounterOption
 
-	delegate atomic.Value //metric.Int64ObservableCounter
+	delegate atomic.Value // metric.Int64ObservableCounter
 }
 
-var _ unwrapper = (*aiCounter)(nil)
-var _ metric.Int64ObservableCounter = (*aiCounter)(nil)
+var (
+	_ unwrapper                     = (*aiCounter)(nil)
+	_ metric.Int64ObservableCounter = (*aiCounter)(nil)
+)
 
 func (i *aiCounter) setDelegate(m metric.Meter) {
 	ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
@@ -150,11 +158,13 @@ type aiUpDownCounter struct {
 	name string
 	opts []metric.Int64ObservableUpDownCounterOption
 
-	delegate atomic.Value //metric.Int64ObservableUpDownCounter
+	delegate atomic.Value // metric.Int64ObservableUpDownCounter
 }
 
-var _ unwrapper = (*aiUpDownCounter)(nil)
-var _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
+var (
+	_ unwrapper                           = (*aiUpDownCounter)(nil)
+	_ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
+)
 
 func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
 	ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
@@ -179,11 +189,13 @@ type aiGauge struct {
 	name string
 	opts []metric.Int64ObservableGaugeOption
 
-	delegate atomic.Value //metric.Int64ObservableGauge
+	delegate atomic.Value // metric.Int64ObservableGauge
 }
 
-var _ unwrapper = (*aiGauge)(nil)
-var _ metric.Int64ObservableGauge = (*aiGauge)(nil)
+var (
+	_ unwrapper                   = (*aiGauge)(nil)
+	_ metric.Int64ObservableGauge = (*aiGauge)(nil)
+)
 
 func (i *aiGauge) setDelegate(m metric.Meter) {
 	ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
@@ -208,7 +220,7 @@ type sfCounter struct {
 	name string
 	opts []metric.Float64CounterOption
 
-	delegate atomic.Value //metric.Float64Counter
+	delegate atomic.Value // metric.Float64Counter
 }
 
 var _ metric.Float64Counter = (*sfCounter)(nil)
@@ -234,7 +246,7 @@ type sfUpDownCounter struct {
 	name string
 	opts []metric.Float64UpDownCounterOption
 
-	delegate atomic.Value //metric.Float64UpDownCounter
+	delegate atomic.Value // metric.Float64UpDownCounter
 }
 
 var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
@@ -260,7 +272,7 @@ type sfHistogram struct {
 	name string
 	opts []metric.Float64HistogramOption
 
-	delegate atomic.Value //metric.Float64Histogram
+	delegate atomic.Value // metric.Float64Histogram
 }
 
 var _ metric.Float64Histogram = (*sfHistogram)(nil)
@@ -286,7 +298,7 @@ type siCounter struct {
 	name string
 	opts []metric.Int64CounterOption
 
-	delegate atomic.Value //metric.Int64Counter
+	delegate atomic.Value // metric.Int64Counter
 }
 
 var _ metric.Int64Counter = (*siCounter)(nil)
@@ -312,7 +324,7 @@ type siUpDownCounter struct {
 	name string
 	opts []metric.Int64UpDownCounterOption
 
-	delegate atomic.Value //metric.Int64UpDownCounter
+	delegate atomic.Value // metric.Int64UpDownCounter
 }
 
 var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
@@ -338,7 +350,7 @@ type siHistogram struct {
 	name string
 	opts []metric.Int64HistogramOption
 
-	delegate atomic.Value //metric.Int64Histogram
+	delegate atomic.Value // metric.Int64Histogram
 }
 
 var _ metric.Int64Histogram = (*siHistogram)(nil)
diff --git a/internal/global/instruments_test.go b/internal/global/instruments_test.go
index c0ab914b6c2..7808f77da30 100644
--- a/internal/global/instruments_test.go
+++ b/internal/global/instruments_test.go
@@ -168,9 +168,11 @@ type testCountingFloatInstrument struct {
 func (i *testCountingFloatInstrument) observe() {
 	i.count++
 }
+
 func (i *testCountingFloatInstrument) Add(context.Context, float64, ...metric.AddOption) {
 	i.count++
 }
+
 func (i *testCountingFloatInstrument) Record(context.Context, float64, ...metric.RecordOption) {
 	i.count++
 }
@@ -190,9 +192,11 @@ type testCountingIntInstrument struct {
 func (i *testCountingIntInstrument) observe() {
 	i.count++
 }
+
 func (i *testCountingIntInstrument) Add(context.Context, int64, ...metric.AddOption) {
 	i.count++
 }
+
 func (i *testCountingIntInstrument) Record(context.Context, int64, ...metric.RecordOption) {
 	i.count++
 }
diff --git a/internal/global/state_test.go b/internal/global/state_test.go
index 93bf6b8aae2..5a049edfeed 100644
--- a/internal/global/state_test.go
+++ b/internal/global/state_test.go
@@ -20,9 +20,10 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	"go.opentelemetry.io/otel/metric"
-	"go.opentelemetry.io/otel/metric/noop"
+	metricnoop "go.opentelemetry.io/otel/metric/noop"
 	"go.opentelemetry.io/otel/propagation"
 	"go.opentelemetry.io/otel/trace"
+	tracenoop "go.opentelemetry.io/otel/trace/noop"
 )
 
 type nonComparableTracerProvider struct {
@@ -55,7 +56,7 @@ func TestSetTracerProvider(t *testing.T) {
 	t.Run("First Set() should replace the delegate", func(t *testing.T) {
 		ResetForTest(t)
 
-		SetTracerProvider(trace.NewNoopTracerProvider())
+		SetTracerProvider(tracenoop.NewTracerProvider())
 
 		_, ok := TracerProvider().(*tracerProvider)
 		if ok {
@@ -67,7 +68,7 @@ func TestSetTracerProvider(t *testing.T) {
 		ResetForTest(t)
 
 		tp := TracerProvider()
-		SetTracerProvider(trace.NewNoopTracerProvider())
+		SetTracerProvider(tracenoop.NewTracerProvider())
 
 		ntp := tp.(*tracerProvider)
 
@@ -153,7 +154,7 @@ func TestSetMeterProvider(t *testing.T) {
 	t.Run("First Set() should replace the delegate", func(t *testing.T) {
 		ResetForTest(t)
 
-		SetMeterProvider(noop.NewMeterProvider())
+		SetMeterProvider(metricnoop.NewMeterProvider())
 
 		_, ok := MeterProvider().(*meterProvider)
 		if ok {
@@ -166,7 +167,7 @@ func TestSetMeterProvider(t *testing.T) {
 
 		mp := MeterProvider()
 
-		SetMeterProvider(noop.NewMeterProvider())
+		SetMeterProvider(metricnoop.NewMeterProvider())
 
 		dmp := mp.(*meterProvider)
 
diff --git a/internal/global/trace.go b/internal/global/trace.go
index 5f008d0982b..3f61ec12a34 100644
--- a/internal/global/trace.go
+++ b/internal/global/trace.go
@@ -39,6 +39,7 @@ import (
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/codes"
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
 )
 
 // tracerProvider is a placeholder for a configured SDK TracerProvider.
@@ -46,6 +47,8 @@ import (
 // All TracerProvider functionality is forwarded to a delegate once
 // configured.
 type tracerProvider struct {
+	embedded.TracerProvider
+
 	mtx      sync.Mutex
 	tracers  map[il]*tracer
 	delegate trace.TracerProvider
@@ -119,6 +122,8 @@ type il struct {
 // All Tracer functionality is forwarded to a delegate once configured.
 // Otherwise, all functionality is forwarded to a NoopTracer.
 type tracer struct {
+	embedded.Tracer
+
 	name     string
 	opts     []trace.TracerOption
 	provider *tracerProvider
@@ -156,6 +161,8 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart
 // SpanContext. It performs no operations other than to return the wrapped
 // SpanContext.
 type nonRecordingSpan struct {
+	embedded.Span
+
 	sc     trace.SpanContext
 	tracer *tracer
 }
diff --git a/internal/global/trace_test.go b/internal/global/trace_test.go
index f8f9149d9e0..d9807493854 100644
--- a/internal/global/trace_test.go
+++ b/internal/global/trace_test.go
@@ -23,9 +23,13 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+	"go.opentelemetry.io/otel/trace/noop"
 )
 
 type fnTracerProvider struct {
+	embedded.TracerProvider
+
 	tracer func(string, ...trace.TracerOption) trace.Tracer
 }
 
@@ -34,6 +38,8 @@ func (fn fnTracerProvider) Tracer(instrumentationName string, opts ...trace.Trac
 }
 
 type fnTracer struct {
+	embedded.Tracer
+
 	start func(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span)
 }
 
@@ -72,7 +78,7 @@ func TestTraceProviderDelegation(t *testing.T) {
 							assert.Equal(t, want, spanName)
 						}
 					}
-					return trace.NewNoopTracerProvider().Tracer(name).Start(ctx, spanName)
+					return noop.NewTracerProvider().Tracer(name).Start(ctx, spanName)
 				},
 			}
 		},
@@ -107,7 +113,7 @@ func TestTraceProviderDelegates(t *testing.T) {
 		tracer: func(name string, opts ...trace.TracerOption) trace.Tracer {
 			called = true
 			assert.Equal(t, "abc", name)
-			return trace.NewNoopTracerProvider().Tracer("")
+			return noop.NewTracerProvider().Tracer("")
 		},
 	})
 
@@ -148,7 +154,7 @@ func TestTraceProviderDelegatesConcurrentSafe(t *testing.T) {
 				// Signal the goroutine to finish.
 				close(quit)
 			}
-			return trace.NewNoopTracerProvider().Tracer("")
+			return noop.NewTracerProvider().Tracer("")
 		},
 	})
 
@@ -195,7 +201,7 @@ func TestTracerDelegatesConcurrentSafe(t *testing.T) {
 						// Signal the goroutine to finish.
 						close(quit)
 					}
-					return trace.NewNoopTracerProvider().Tracer("").Start(ctx, spanName)
+					return noop.NewTracerProvider().Tracer("").Start(ctx, spanName)
 				},
 			}
 		},
@@ -218,7 +224,7 @@ func TestTraceProviderDelegatesSameInstance(t *testing.T) {
 
 	SetTracerProvider(fnTracerProvider{
 		tracer: func(name string, opts ...trace.TracerOption) trace.Tracer {
-			return trace.NewNoopTracerProvider().Tracer("")
+			return noop.NewTracerProvider().Tracer("")
 		},
 	})
 
diff --git a/internal/internaltest/harness.go b/internal/internaltest/harness.go
index e84eed9e719..ce8e2035f38 100644
--- a/internal/internaltest/harness.go
+++ b/internal/internaltest/harness.go
@@ -263,7 +263,7 @@ func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) {
 }
 
 func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
-	var methods = map[string]func(span trace.Span){
+	methods := map[string]func(span trace.Span){
 		"#End": func(span trace.Span) {
 			span.End()
 		},
@@ -283,7 +283,7 @@ func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
 			span.SetAttributes(attribute.String("key1", "value"), attribute.Int("key2", 123))
 		},
 	}
-	var mechanisms = map[string]func() trace.Span{
+	mechanisms := map[string]func() trace.Span{
 		"Span created via Tracer#Start": func() trace.Span {
 			tracer := tracerFactory()
 			_, subject := tracer.Start(context.Background(), "test")
diff --git a/internal/internaltest/text_map_carrier_test.go b/internal/internaltest/text_map_carrier_test.go
index faf713cc2d0..086c8af26ea 100644
--- a/internal/internaltest/text_map_carrier_test.go
+++ b/internal/internaltest/text_map_carrier_test.go
@@ -22,9 +22,7 @@ import (
 	"testing"
 )
 
-var (
-	key, value = "test", "true"
-)
+var key, value = "test", "true"
 
 func TestTextMapCarrierKeys(t *testing.T) {
 	tmc := NewTextMapCarrier(map[string]string{key: value})
diff --git a/internal/matchers/expectation.go b/internal/matchers/expectation.go
index 9cf408258b0..f54f63afbb3 100644
--- a/internal/matchers/expectation.go
+++ b/internal/matchers/expectation.go
@@ -27,9 +27,7 @@ import (
 	"time"
 )
 
-var (
-	stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`)
-)
+var stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`)
 
 type Expectation struct {
 	t      *testing.T
diff --git a/internal/shared/internaltest/harness.go.tmpl b/internal/shared/internaltest/harness.go.tmpl
index da3e7f18615..7223b9d2196 100644
--- a/internal/shared/internaltest/harness.go.tmpl
+++ b/internal/shared/internaltest/harness.go.tmpl
@@ -263,7 +263,7 @@ func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) {
 }
 
 func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
-	var methods = map[string]func(span trace.Span){
+	methods := map[string]func(span trace.Span){
 		"#End": func(span trace.Span) {
 			span.End()
 		},
@@ -283,7 +283,7 @@ func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
 			span.SetAttributes(attribute.String("key1", "value"), attribute.Int("key2", 123))
 		},
 	}
-	var mechanisms = map[string]func() trace.Span{
+	mechanisms := map[string]func() trace.Span{
 		"Span created via Tracer#Start": func() trace.Span {
 			tracer := tracerFactory()
 			_, subject := tracer.Start(context.Background(), "test")
diff --git a/internal/shared/internaltest/text_map_carrier_test.go.tmpl b/internal/shared/internaltest/text_map_carrier_test.go.tmpl
index faf713cc2d0..086c8af26ea 100644
--- a/internal/shared/internaltest/text_map_carrier_test.go.tmpl
+++ b/internal/shared/internaltest/text_map_carrier_test.go.tmpl
@@ -22,9 +22,7 @@ import (
 	"testing"
 )
 
-var (
-	key, value = "test", "true"
-)
+var key, value = "test", "true"
 
 func TestTextMapCarrierKeys(t *testing.T) {
 	tmc := NewTextMapCarrier(map[string]string{key: value})
diff --git a/internal/shared/matchers/expectation.go.tmpl b/internal/shared/matchers/expectation.go.tmpl
index bdde84ea78a..4002fec51f1 100644
--- a/internal/shared/matchers/expectation.go.tmpl
+++ b/internal/shared/matchers/expectation.go.tmpl
@@ -27,9 +27,7 @@ import (
 	"time"
 )
 
-var (
-	stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`)
-)
+var stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`)
 
 type Expectation struct {
 	t      *testing.T
diff --git a/internal/shared/otlp/envconfig/envconfig.go.tmpl b/internal/shared/otlp/envconfig/envconfig.go.tmpl
index 480f5f3cfd0..b516e9ca2c3 100644
--- a/internal/shared/otlp/envconfig/envconfig.go.tmpl
+++ b/internal/shared/otlp/envconfig/envconfig.go.tmpl
@@ -174,13 +174,13 @@ func stringToHeader(value string) map[string]string {
 			global.Error(errors.New("missing '="), "parse headers", "input", header)
 			continue
 		}
-		name, err := url.QueryUnescape(n)
+		name, err := url.PathUnescape(n)
 		if err != nil {
 			global.Error(err, "escape header key", "key", n)
 			continue
 		}
 		trimmedName := strings.TrimSpace(name)
-		value, err := url.QueryUnescape(v)
+		value, err := url.PathUnescape(v)
 		if err != nil {
 			global.Error(err, "escape header value", "value", v)
 			continue
diff --git a/internal/shared/otlp/envconfig/envconfig_test.go.tmpl b/internal/shared/otlp/envconfig/envconfig_test.go.tmpl
index cec506208d5..6cbe0c7ab11 100644
--- a/internal/shared/otlp/envconfig/envconfig_test.go.tmpl
+++ b/internal/shared/otlp/envconfig/envconfig_test.go.tmpl
@@ -427,7 +427,12 @@ func TestStringToHeader(t *testing.T) {
 			want:  map[string]string{"userId": "alice"},
 		},
 		{
-			name:  "multiples headers encoded",
+			name:  "simple header conforms to RFC 3986 spec",
+			value: " userId = alice+test ",
+			want:  map[string]string{"userId": "alice+test"},
+		},
+		{
+			name:  "multiple headers encoded",
 			value: "userId=alice,serverNode=DF%3A28,isProduction=false",
 			want: map[string]string{
 				"userId":       "alice",
@@ -435,6 +440,16 @@ func TestStringToHeader(t *testing.T) {
 				"isProduction": "false",
 			},
 		},
+		{
+			name:  "multiple headers encoded per RFC 3986 spec",
+			value: "userId=alice+test,serverNode=DF%3A28,isProduction=false,namespace=localhost/test",
+			want: map[string]string{
+				"userId":       "alice+test",
+				"serverNode":   "DF:28",
+				"isProduction": "false",
+				"namespace":    "localhost/test",
+			},
+		},
 		{
 			name:  "invalid headers format",
 			value: "userId:alice",
diff --git a/internal/shared/otlp/otlpmetric/oconf/options.go.tmpl b/internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
index 9518b23e8bc..c4f9d0830f4 100644
--- a/internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
+++ b/internal/shared/otlp/otlpmetric/oconf/options.go.tmpl
@@ -30,7 +30,6 @@ import (
 	"google.golang.org/grpc/credentials/insecure"
 	"google.golang.org/grpc/encoding/gzip"
 
-	"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
 	"{{ .retryImportPath }}"
 	"go.opentelemetry.io/otel/sdk/metric"
 )
@@ -122,7 +121,6 @@ func cleanPath(urlPath string, defaultPath string) string {
 // NewGRPCConfig returns a new Config with all settings applied from opts and
 // any unset setting using the default gRPC config values.
 func NewGRPCConfig(opts ...GRPCOption) Config {
-	userAgent := "OTel OTLP Exporter Go/" + otlpmetric.Version()
 	cfg := Config{
 		Metrics: SignalConfig{
 			Endpoint:    fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
@@ -134,7 +132,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
 			AggregationSelector: metric.DefaultAggregationSelector,
 		},
 		RetryConfig: retry.DefaultConfig,
-		DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
 	}
 	cfg = ApplyGRPCEnvConfigs(cfg)
 	for _, opt := range opts {
@@ -158,9 +155,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
 	if cfg.Metrics.Compression == GzipCompression {
 		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
 	}
-	if len(cfg.DialOptions) != 0 {
-		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
-	}
 	if cfg.ReconnectionPeriod != 0 {
 		p := grpc.ConnectParams{
 			Backoff:           backoff.DefaultConfig,
diff --git a/internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl b/internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl
index 16ddfdb7b53..7c52efc51d9 100644
--- a/internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl
+++ b/internal/shared/otlp/otlpmetric/oconf/options_test.go.tmpl
@@ -203,7 +203,7 @@ func TestConfigs(t *testing.T) {
 			},
 			asserts: func(t *testing.T, c *Config, grpcOption bool) {
 				if grpcOption {
-					//TODO: make sure gRPC's credentials actually works
+					// TODO: make sure gRPC's credentials actually works
 					assert.NotNil(t, c.Metrics.GRPCCredentials)
 				} else {
 					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
diff --git a/internal/shared/otlp/otlpmetric/otest/client.go.tmpl b/internal/shared/otlp/otlpmetric/otest/client.go.tmpl
index 6c8be4982e5..37f25c9b468 100644
--- a/internal/shared/otlp/otlpmetric/otest/client.go.tmpl
+++ b/internal/shared/otlp/otlpmetric/otest/client.go.tmpl
@@ -38,7 +38,7 @@ import (
 
 var (
 	// Sat Jan 01 2000 00:00:00 GMT+0000.
-	start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
+	start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0))
 	end   = start.Add(30 * time.Second)
 
 	kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{
diff --git a/internal/shared/otlp/otlpmetric/otest/collector.go.tmpl b/internal/shared/otlp/otlpmetric/otest/collector.go.tmpl
index 31fc32224b9..fba237e68fc 100644
--- a/internal/shared/otlp/otlpmetric/otest/collector.go.tmpl
+++ b/internal/shared/otlp/otlpmetric/otest/collector.go.tmpl
@@ -195,6 +195,8 @@ func (e *HTTPResponseError) Unwrap() error { return e.Err }
 
 // HTTPCollector is an OTLP HTTP server that collects all requests it receives.
 type HTTPCollector struct {
+	plainTextResponse bool
+
 	headersMu sync.Mutex
 	headers   http.Header
 	storage   *Storage
@@ -217,7 +219,7 @@ type HTTPCollector struct {
 // If errCh is not nil, the collector will respond to HTTP requests with errors
 // sent on that channel. This means that if errCh is not nil Export calls will
 // block until an error is received.
-func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPCollector, error) {
+func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult, opts ...func(*HTTPCollector)) (*HTTPCollector, error) {
 	u, err := url.Parse(endpoint)
 	if err != nil {
 		return nil, err
@@ -234,6 +236,9 @@ func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPColle
 		storage:  NewStorage(),
 		resultCh: resultCh,
 	}
+	for _, opt := range opts {
+		opt(c)
+	}
 
 	c.listener, err = net.Listen("tcp", u.Host)
 	if err != nil {
@@ -242,7 +247,11 @@ func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPColle
 
 	mux := http.NewServeMux()
 	mux.Handle(u.Path, http.HandlerFunc(c.handler))
-	c.srv = &http.Server{Handler: mux}
+	c.srv = &http.Server{
+		Handler:      mux,
+		ReadTimeout:  10 * time.Second,
+		WriteTimeout: 10 * time.Second,
+	}
 	if u.Scheme == "https" {
 		cert, err := weakCertificate()
 		if err != nil {
@@ -258,6 +267,14 @@ func NewHTTPCollector(endpoint string, resultCh <-chan ExportResult) (*HTTPColle
 	return c, nil
 }
 
+// WithHTTPCollectorRespondingPlainText makes the HTTPCollector return
+// a plaintext, instead of protobuf, response.
+func WithHTTPCollectorRespondingPlainText() func(*HTTPCollector) {
+	return func(s *HTTPCollector) {
+		s.plainTextResponse = true
+	}
+}
+
 // Shutdown shuts down the HTTP server closing all open connections and
 // listeners.
 func (c *HTTPCollector) Shutdown(ctx context.Context) error {
@@ -378,6 +395,13 @@ func (c *HTTPCollector) respond(w http.ResponseWriter, resp ExportResult) {
 		return
 	}
 
+	if c.plainTextResponse {
+		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+		w.WriteHeader(http.StatusOK)
+		_, _ = w.Write([]byte("OK"))
+		return
+	}
+
 	w.Header().Set("Content-Type", "application/x-protobuf")
 	w.WriteHeader(http.StatusOK)
 	if resp.Response == nil {
diff --git a/internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl b/internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl
index b94c48dae8d..676e5785633 100644
--- a/internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl
+++ b/internal/shared/otlp/otlpmetric/transform/metricdata_test.go.tmpl
@@ -40,7 +40,7 @@ type unknownAggT struct {
 
 var (
 	// Sat Jan 01 2000 00:00:00 GMT+0000.
-	start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0))
+	start = time.Date(2000, time.January, 0o1, 0, 0, 0, 0, time.FixedZone("GMT", 0))
 	end   = start.Add(30 * time.Second)
 
 	alice = attribute.NewSet(attribute.String("user", "alice"))
diff --git a/internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl b/internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
index 99d9e4804e9..9270e506a9c 100644
--- a/internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
+++ b/internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
@@ -141,9 +141,6 @@ func NewGRPCConfig(opts ...GRPCOption) Config {
 	if cfg.Traces.Compression == GzipCompression {
 		cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
 	}
-	if len(cfg.DialOptions) != 0 {
-		cfg.DialOptions = append(cfg.DialOptions, cfg.DialOptions...)
-	}
 	if cfg.ReconnectionPeriod != 0 {
 		p := grpc.ConnectParams{
 			Backoff:           backoff.DefaultConfig,
diff --git a/internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl b/internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl
index fcf99f0ad42..b83891a89f6 100644
--- a/internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl
+++ b/internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl
@@ -201,7 +201,7 @@ func TestConfigs(t *testing.T) {
 			},
 			asserts: func(t *testing.T, c *Config, grpcOption bool) {
 				if grpcOption {
-					//TODO: make sure gRPC's credentials actually works
+					// TODO: make sure gRPC's credentials actually works
 					assert.NotNil(t, c.Traces.GRPCCredentials)
 				} else {
 					// nolint:staticcheck // ignoring tlsCert.RootCAs.Subjects is deprecated ERR because cert does not come from SystemCertPool.
diff --git a/internal/tools/go.mod b/internal/tools/go.mod
index 395990cc8d2..b4f6570cb96 100644
--- a/internal/tools/go.mod
+++ b/internal/tools/go.mod
@@ -1,39 +1,42 @@
 module go.opentelemetry.io/otel/internal/tools
 
-go 1.19
+go 1.20
 
 require (
 	github.com/client9/misspell v0.3.4
 	github.com/gogo/protobuf v1.3.2
-	github.com/golangci/golangci-lint v1.54.1
-	github.com/itchyny/gojq v0.12.13
-	github.com/jcchavezs/porto v0.4.0
+	github.com/golangci/golangci-lint v1.55.2
+	github.com/itchyny/gojq v0.12.14
+	github.com/jcchavezs/porto v0.6.0
 	github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad
-	go.opentelemetry.io/build-tools/crosslink v0.11.0
-	go.opentelemetry.io/build-tools/dbotconf v0.11.0
-	go.opentelemetry.io/build-tools/gotmpl v0.11.0
-	go.opentelemetry.io/build-tools/multimod v0.11.0
-	go.opentelemetry.io/build-tools/semconvgen v0.11.0
+	go.opentelemetry.io/build-tools/crosslink v0.12.0
+	go.opentelemetry.io/build-tools/dbotconf v0.12.0
+	go.opentelemetry.io/build-tools/gotmpl v0.12.0
+	go.opentelemetry.io/build-tools/multimod v0.12.0
+	go.opentelemetry.io/build-tools/semconvgen v0.12.0
 	golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea
-	golang.org/x/tools v0.12.0
+	golang.org/x/tools v0.16.0
+	golang.org/x/vuln v1.0.1
 )
 
 require (
 	4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
 	4d63.com/gochecknoglobals v0.2.1 // indirect
 	dario.cat/mergo v1.0.0 // indirect
-	github.com/4meepo/tagalign v1.3.2 // indirect
-	github.com/Abirdcfly/dupword v0.0.12 // indirect
-	github.com/Antonboom/errname v0.1.10 // indirect
-	github.com/Antonboom/nilnil v0.1.5 // indirect
+	github.com/4meepo/tagalign v1.3.3 // indirect
+	github.com/Abirdcfly/dupword v0.0.13 // indirect
+	github.com/Antonboom/errname v0.1.12 // indirect
+	github.com/Antonboom/nilnil v0.1.7 // indirect
+	github.com/Antonboom/testifylint v0.2.3 // indirect
 	github.com/BurntSushi/toml v1.3.2 // indirect
 	github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
 	github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect
 	github.com/Masterminds/semver v1.5.0 // indirect
 	github.com/Microsoft/go-winio v0.6.1 // indirect
 	github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect
-	github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 // indirect
+	github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect
 	github.com/acomagu/bufpipe v1.0.4 // indirect
+	github.com/alecthomas/go-check-sumtype v0.1.3 // indirect
 	github.com/alexkohler/nakedret/v2 v2.0.2 // indirect
 	github.com/alexkohler/prealloc v1.0.0 // indirect
 	github.com/alingse/asasalint v0.0.11 // indirect
@@ -43,16 +46,19 @@ require (
 	github.com/bkielbasa/cyclop v1.2.1 // indirect
 	github.com/blizzy78/varnamelen v0.8.0 // indirect
 	github.com/bombsimon/wsl/v3 v3.4.0 // indirect
-	github.com/breml/bidichk v0.2.4 // indirect
-	github.com/breml/errchkjson v0.3.1 // indirect
-	github.com/butuzov/ireturn v0.2.0 // indirect
+	github.com/breml/bidichk v0.2.7 // indirect
+	github.com/breml/errchkjson v0.3.6 // indirect
+	github.com/butuzov/ireturn v0.2.2 // indirect
 	github.com/butuzov/mirror v1.1.0 // indirect
+	github.com/catenacyber/perfsprint v0.2.0 // indirect
+	github.com/ccojocar/zxcvbn-go v1.0.1 // indirect
 	github.com/cespare/xxhash/v2 v2.2.0 // indirect
 	github.com/charithe/durationcheck v0.0.10 // indirect
-	github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect
+	github.com/chavacava/garif v0.1.0 // indirect
 	github.com/cloudflare/circl v1.3.3 // indirect
 	github.com/curioswitch/go-reassign v0.2.0 // indirect
-	github.com/daixiang0/gci v0.11.0 // indirect
+	github.com/cyphar/filepath-securejoin v0.2.4 // indirect
+	github.com/daixiang0/gci v0.11.2 // indirect
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/denis-tingaikin/go-header v0.4.3 // indirect
 	github.com/emirpasic/gods v1.18.1 // indirect
@@ -63,10 +69,11 @@ require (
 	github.com/firefart/nonamedreturns v1.0.4 // indirect
 	github.com/fsnotify/fsnotify v1.6.0 // indirect
 	github.com/fzipp/gocyclo v0.6.0 // indirect
+	github.com/ghostiam/protogetter v0.2.3 // indirect
 	github.com/go-critic/go-critic v0.9.0 // indirect
 	github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
-	github.com/go-git/go-billy/v5 v5.4.1 // indirect
-	github.com/go-git/go-git/v5 v5.8.1 // indirect
+	github.com/go-git/go-billy/v5 v5.5.0 // indirect
+	github.com/go-git/go-git/v5 v5.9.0 // indirect
 	github.com/go-toolsmith/astcast v1.1.0 // indirect
 	github.com/go-toolsmith/astcopy v1.1.0 // indirect
 	github.com/go-toolsmith/astequal v1.1.0 // indirect
@@ -82,13 +89,13 @@ require (
 	github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
 	github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
 	github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect
-	github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect
+	github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect
 	github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
 	github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
 	github.com/golangci/misspell v0.4.1 // indirect
-	github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect
+	github.com/golangci/revgrep v0.5.2 // indirect
 	github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
-	github.com/google/go-cmp v0.5.9 // indirect
+	github.com/google/go-cmp v0.6.0 // indirect
 	github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect
 	github.com/gostaticanalysis/analysisutil v0.7.1 // indirect
 	github.com/gostaticanalysis/comment v1.4.2 // indirect
@@ -102,7 +109,7 @@ require (
 	github.com/inconshreveable/mousetrap v1.1.0 // indirect
 	github.com/itchyny/timefmt-go v0.1.5 // indirect
 	github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
-	github.com/jgautheron/goconst v1.5.1 // indirect
+	github.com/jgautheron/goconst v1.6.0 // indirect
 	github.com/jingyugao/rowserrcheck v1.1.1 // indirect
 	github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
 	github.com/julz/importas v0.1.0 // indirect
@@ -117,44 +124,44 @@ require (
 	github.com/ldez/tagliatelle v0.5.0 // indirect
 	github.com/leonklingele/grouper v1.1.1 // indirect
 	github.com/lufeee/execinquery v1.2.1 // indirect
+	github.com/macabu/inamedparam v0.1.2 // indirect
 	github.com/magiconair/properties v1.8.7 // indirect
 	github.com/maratori/testableexamples v1.0.0 // indirect
 	github.com/maratori/testpackage v1.1.1 // indirect
 	github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect
 	github.com/mattn/go-colorable v0.1.13 // indirect
-	github.com/mattn/go-isatty v0.0.19 // indirect
-	github.com/mattn/go-runewidth v0.0.14 // indirect
+	github.com/mattn/go-isatty v0.0.20 // indirect
+	github.com/mattn/go-runewidth v0.0.15 // indirect
 	github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
 	github.com/mbilski/exhaustivestruct v1.2.0 // indirect
-	github.com/mgechev/revive v1.3.2 // indirect
+	github.com/mgechev/revive v1.3.4 // indirect
 	github.com/mitchellh/go-homedir v1.1.0 // indirect
 	github.com/mitchellh/mapstructure v1.5.0 // indirect
 	github.com/moricho/tparallel v0.3.1 // indirect
 	github.com/nakabonne/nestif v0.3.1 // indirect
-	github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
 	github.com/nishanths/exhaustive v0.11.0 // indirect
 	github.com/nishanths/predeclared v0.2.2 // indirect
-	github.com/nunnatsa/ginkgolinter v0.13.3 // indirect
+	github.com/nunnatsa/ginkgolinter v0.14.1 // indirect
 	github.com/olekukonko/tablewriter v0.0.5 // indirect
 	github.com/pelletier/go-toml/v2 v2.0.8 // indirect
 	github.com/pjbgf/sha1cd v0.3.0 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	github.com/polyfloyd/go-errorlint v1.4.3 // indirect
-	github.com/prometheus/client_golang v1.16.0 // indirect
-	github.com/prometheus/client_model v0.4.0 // indirect
-	github.com/prometheus/common v0.42.0 // indirect
-	github.com/prometheus/procfs v0.10.1 // indirect
+	github.com/polyfloyd/go-errorlint v1.4.5 // indirect
+	github.com/prometheus/client_golang v1.17.0 // indirect
+	github.com/prometheus/client_model v0.5.0 // indirect
+	github.com/prometheus/common v0.44.0 // indirect
+	github.com/prometheus/procfs v0.11.1 // indirect
 	github.com/quasilyte/go-ruleguard v0.4.0 // indirect
 	github.com/quasilyte/gogrep v0.5.0 // indirect
 	github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
 	github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
 	github.com/rivo/uniseg v0.4.4 // indirect
 	github.com/ryancurrah/gomodguard v1.3.0 // indirect
-	github.com/ryanrolds/sqlclosecheck v0.4.0 // indirect
+	github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect
 	github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect
 	github.com/sashamelentyev/interfacebloat v1.1.0 // indirect
-	github.com/sashamelentyev/usestdlibvars v1.23.0 // indirect
-	github.com/securego/gosec/v2 v2.16.0 // indirect
+	github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect
+	github.com/securego/gosec/v2 v2.18.2 // indirect
 	github.com/sergi/go-diff v1.2.0 // indirect
 	github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
 	github.com/sirupsen/logrus v1.9.3 // indirect
@@ -177,40 +184,40 @@ require (
 	github.com/subosito/gotenv v1.4.2 // indirect
 	github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect
 	github.com/tdakkota/asciicheck v0.2.0 // indirect
-	github.com/tetafro/godot v1.4.11 // indirect
+	github.com/tetafro/godot v1.4.15 // indirect
 	github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect
 	github.com/timonwong/loggercheck v0.9.4 // indirect
 	github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect
 	github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
 	github.com/ultraware/funlen v0.1.0 // indirect
 	github.com/ultraware/whitespace v0.0.5 // indirect
-	github.com/uudashr/gocognit v1.0.7 // indirect
+	github.com/uudashr/gocognit v1.1.2 // indirect
 	github.com/xanzy/ssh-agent v0.3.3 // indirect
-	github.com/xen0n/gosmopolitan v1.2.1 // indirect
+	github.com/xen0n/gosmopolitan v1.2.2 // indirect
 	github.com/yagipy/maintidx v1.0.0 // indirect
 	github.com/yeya24/promlinter v0.2.0 // indirect
 	github.com/ykadowak/zerologlint v0.1.3 // indirect
-	gitlab.com/bosi/decorder v0.4.0 // indirect
-	go.opentelemetry.io/build-tools v0.11.0 // indirect
-	go.tmz.dev/musttag v0.7.1 // indirect
-	go.uber.org/atomic v1.10.0 // indirect
+	gitlab.com/bosi/decorder v0.4.1 // indirect
+	go-simpler.org/sloglint v0.1.2 // indirect
+	go.opentelemetry.io/build-tools v0.12.0 // indirect
+	go.tmz.dev/musttag v0.7.2 // indirect
 	go.uber.org/multierr v1.11.0 // indirect
-	go.uber.org/zap v1.24.0 // indirect
-	golang.org/x/crypto v0.12.0 // indirect
+	go.uber.org/zap v1.26.0 // indirect
+	golang.org/x/crypto v0.16.0 // indirect
 	golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect
-	golang.org/x/mod v0.12.0 // indirect
-	golang.org/x/net v0.14.0 // indirect
-	golang.org/x/sync v0.3.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
-	golang.org/x/text v0.12.0 // indirect
+	golang.org/x/mod v0.14.0 // indirect
+	golang.org/x/net v0.19.0 // indirect
+	golang.org/x/sync v0.5.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
+	golang.org/x/text v0.14.0 // indirect
 	google.golang.org/protobuf v1.31.0 // indirect
 	gopkg.in/ini.v1 v1.67.0 // indirect
 	gopkg.in/warnings.v0 v0.1.2 // indirect
 	gopkg.in/yaml.v2 v2.4.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
-	honnef.co/go/tools v0.4.3 // indirect
+	honnef.co/go/tools v0.4.6 // indirect
 	mvdan.cc/gofumpt v0.5.0 // indirect
 	mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
 	mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
-	mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect
+	mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8 // indirect
 )
diff --git a/internal/tools/go.sum b/internal/tools/go.sum
index 3f509773011..d04b80dbd2f 100644
--- a/internal/tools/go.sum
+++ b/internal/tools/go.sum
@@ -42,14 +42,16 @@ cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3f
 dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
 dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/4meepo/tagalign v1.3.2 h1:1idD3yxlRGV18VjqtDbqYvQ5pXqQS0wO2dn6M3XstvI=
-github.com/4meepo/tagalign v1.3.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
-github.com/Abirdcfly/dupword v0.0.12 h1:56NnOyrXzChj07BDFjeRA+IUzSz01jmzEq+G4kEgFhc=
-github.com/Abirdcfly/dupword v0.0.12/go.mod h1:+us/TGct/nI9Ndcbcp3rgNcQzctTj68pq7TcgNpLfdI=
-github.com/Antonboom/errname v0.1.10 h1:RZ7cYo/GuZqjr1nuJLNe8ZH+a+Jd9DaZzttWzak9Bls=
-github.com/Antonboom/errname v0.1.10/go.mod h1:xLeiCIrvVNpUtsN0wxAh05bNIZpqE22/qDMnTBTttiA=
-github.com/Antonboom/nilnil v0.1.5 h1:X2JAdEVcbPaOom2TUa1FxZ3uyuUlex0XMLGYMemu6l0=
-github.com/Antonboom/nilnil v0.1.5/go.mod h1:I24toVuBKhfP5teihGWctrRiPbRKHwZIFOvc6v3HZXk=
+github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw=
+github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
+github.com/Abirdcfly/dupword v0.0.13 h1:SMS17YXypwP000fA7Lr+kfyBQyW14tTT+nRv9ASwUUo=
+github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y=
+github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY=
+github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro=
+github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow=
+github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ=
+github.com/Antonboom/testifylint v0.2.3 h1:MFq9zyL+rIVpsvLX4vDPLojgN7qODzWsrnftNX2Qh60=
+github.com/Antonboom/testifylint v0.2.3/go.mod h1:IYaXaOX9NbfAyO+Y04nfjGI8wDemC1rUyM/cYolz018=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
 github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
@@ -65,10 +67,14 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc
 github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
 github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY=
 github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ=
-github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95 h1:KLq8BE0KwCL+mmXnjLWEAOYO+2l2AE4YMmqG1ZpZHBs=
-github.com/ProtonMail/go-crypto v0.0.0-20230717121422-5aa5874ade95/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
+github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg=
+github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
 github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ=
 github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
+github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk=
+github.com/alecthomas/go-check-sumtype v0.1.3 h1:M+tqMxB68hcgccRXBMVCPI4UJ+QUfdSx0xdbypKCqA8=
+github.com/alecthomas/go-check-sumtype v0.1.3/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ=
+github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -86,7 +92,6 @@ github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8ger
 github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
 github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
 github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
-github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
 github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -97,15 +102,19 @@ github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ
 github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k=
 github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU=
 github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo=
-github.com/breml/bidichk v0.2.4 h1:i3yedFWWQ7YzjdZJHnPo9d/xURinSq3OM+gyM43K4/8=
-github.com/breml/bidichk v0.2.4/go.mod h1:7Zk0kRFt1LIZxtQdl9W9JwGAcLTTkOs+tN7wuEYGJ3s=
-github.com/breml/errchkjson v0.3.1 h1:hlIeXuspTyt8Y/UmP5qy1JocGNR00KQHgfaNtRAjoxQ=
-github.com/breml/errchkjson v0.3.1/go.mod h1:XroxrzKjdiutFyW3nWhw34VGg7kiMsDQox73yWCGI2U=
-github.com/butuzov/ireturn v0.2.0 h1:kCHi+YzC150GE98WFuZQu9yrTn6GEydO2AuPLbTgnO4=
-github.com/butuzov/ireturn v0.2.0/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc=
+github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY=
+github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ=
+github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA=
+github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U=
+github.com/butuzov/ireturn v0.2.2 h1:jWI36dxXwVrI+RnXDwux2IZOewpmfv930OuIRfaBUJ0=
+github.com/butuzov/ireturn v0.2.2/go.mod h1:RfGHUvvAuFFxoHKf4Z8Yxuh6OjlCw1KvR2zM1NFHeBk=
 github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI=
 github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE=
 github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
+github.com/catenacyber/perfsprint v0.2.0 h1:azOocHLscPjqXVJ7Mf14Zjlkn4uNua0+Hcg1wTR6vUo=
+github.com/catenacyber/perfsprint v0.2.0/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50=
+github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4=
+github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
 github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -113,8 +122,8 @@ github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj
 github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4=
 github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ=
-github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 h1:W9o46d2kbNL06lq7UNDPV0zYLzkrde/bjIqO02eoll0=
-github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8/go.mod h1:gakxgyXaaPkxvLw1XQxNGK4I37ys9iBRzNUx/B7pUCo=
+github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc=
+github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -126,17 +135,18 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
 github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
 github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
 github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
 github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
-github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A=
-github.com/daixiang0/gci v0.11.0/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/daixiang0/gci v0.11.2 h1:Oji+oPsp3bQ6bNNgX30NBAVT18P4uBH4sRZnlOlTj7Y=
+github.com/daixiang0/gci v0.11.2/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU=
 github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c=
-github.com/elazarl/goproxy v0.0.0-20221015165544-a0805db90819 h1:RIB4cRk+lBqKK3Oy0r2gRX4ui7tuhiZq2SuTtTCi0/0=
+github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU=
 github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
 github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -160,16 +170,18 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4
 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
 github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
 github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
+github.com/ghostiam/protogetter v0.2.3 h1:qdv2pzo3BpLqezwqfGDLZ+nHEYmc5bUpIdsMbBVwMjw=
+github.com/ghostiam/protogetter v0.2.3/go.mod h1:KmNLOsy1v04hKbvZs8EfGI1fk39AgTdRDxWNYPfXVc4=
 github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
 github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U=
 github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40=
 github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
 github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
-github.com/go-git/go-billy/v5 v5.4.1 h1:Uwp5tDRkPr+l/TnbHOQzp+tmJfLceOlbVucgpTz8ix4=
-github.com/go-git/go-billy/v5 v5.4.1/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
+github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
+github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
 github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20230305113008-0c11038e723f h1:Pz0DHeFij3XFhoBRGUDPzSJ+w2UcK5/0JvF8DRI58r8=
-github.com/go-git/go-git/v5 v5.8.1 h1:Zo79E4p7TRk0xoRgMq0RShiTHGKcKI4+DI6BfJc/Q+A=
-github.com/go-git/go-git/v5 v5.8.1/go.mod h1:FHFuoD6yGz5OSKEBK+aWN9Oah0q54Jxl0abmj6GnqAo=
+github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY=
+github.com/go-git/go-git/v5 v5.9.0/go.mod h1:RKIqga24sWdMGZF+1Ekv9kylsDz6LzdTSI2s/OsZWE0=
 github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -245,22 +257,23 @@ github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9
 github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
 github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo=
 github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
-github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY=
-github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
-github.com/golangci/golangci-lint v1.54.1 h1:0qMrH1gkeIBqCZaaAm5Fwq4xys9rO/lJofHfZURIFFk=
-github.com/golangci/golangci-lint v1.54.1/go.mod h1:JK47+qksV/t2mAz9YvndwT0ZLW4A1rvDljOs3g9jblo=
+github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g=
+github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM=
+github.com/golangci/golangci-lint v1.55.2 h1:yllEIsSJ7MtlDBwDJ9IMBkyEUz2fYE0b5B8IUgO1oP8=
+github.com/golangci/golangci-lint v1.55.2/go.mod h1:H60CZ0fuqoTwlTvnbyjhpZPWp7KmsjwV2yupIMiMXbM=
 github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
 github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
 github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
 github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
 github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g=
 github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI=
-github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ=
-github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
+github.com/golangci/revgrep v0.5.2 h1:EndcWoRhcnfj2NHQ+28hyuXpLMF+dQmCN+YaeeIl4FU=
+github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA=
 github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
 github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786 h1:rcv+Ippz6RAtvaGgKxc+8FQIpxHgsF+HBzPyYL2cyVU=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
 github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -273,8 +286,8 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -290,6 +303,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
 github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
+github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
 github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
@@ -325,16 +339,16 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
 github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/itchyny/gojq v0.12.13 h1:IxyYlHYIlspQHHTE0f3cJF0NKDMfajxViuhBLnHd/QU=
-github.com/itchyny/gojq v0.12.13/go.mod h1:JzwzAqenfhrPUuwbmEz3nu3JQmFLlQTQMUcOdnu/Sf4=
+github.com/itchyny/gojq v0.12.14 h1:6k8vVtsrhQSYgSGg827AD+PVVaB1NLXEdX+dda2oZCc=
+github.com/itchyny/gojq v0.12.14/go.mod h1:y1G7oO7XkcR1LPZO59KyoCRy08T3j9vDYRV0GgYSS+s=
 github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE=
 github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8=
 github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
 github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
-github.com/jcchavezs/porto v0.4.0 h1:Zj7RligrxmDdKGo6fBO2xYAHxEgrVBfs1YAja20WbV4=
-github.com/jcchavezs/porto v0.4.0/go.mod h1:fESH0gzDHiutHRdX2hv27ojnOVFco37hg1W6E9EZF4A=
-github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM=
-github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
+github.com/jcchavezs/porto v0.6.0 h1:AgQLGwsXaxDkPj4Y+paFkVGLAR4n/1RRF0xV5UKinwg=
+github.com/jcchavezs/porto v0.6.0/go.mod h1:fESH0gzDHiutHRdX2hv27ojnOVFco37hg1W6E9EZF4A=
+github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1ADWGA=
+github.com/jgautheron/goconst v1.6.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4=
 github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs=
 github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c=
 github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48=
@@ -364,12 +378,10 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
 github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
 github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
 github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
 github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=
 github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
 github.com/kunwardeep/paralleltest v1.0.8 h1:Ul2KsqtzFxTlSU7IP0JusWlLiNqQaloB9vguyjbE558=
@@ -384,6 +396,8 @@ github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt
 github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY=
 github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM=
 github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM=
+github.com/macabu/inamedparam v0.1.2 h1:RR5cnayM6Q7cDhQol32DE2BGAPGMnffJ31LFE+UklaU=
+github.com/macabu/inamedparam v0.1.2/go.mod h1:Xg25QvY7IBRl1KLPV9Rbml8JOMZtF/iAkNkmV7eQgjw=
 github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
 github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
 github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI=
@@ -398,18 +412,18 @@ github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwM
 github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
 github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
 github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
-github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
 github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
-github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
-github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
+github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
 github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
 github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
 github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo=
 github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc=
-github.com/mgechev/revive v1.3.2 h1:Wb8NQKBaALBJ3xrrj4zpwJwqwNA6nDpyJSEQWcCka6U=
-github.com/mgechev/revive v1.3.2/go.mod h1:UCLtc7o5vg5aXCwdUTU1kEBQ1v+YXPAkYDIDXbrs5I0=
+github.com/mgechev/revive v1.3.4 h1:k/tO3XTaWY4DEHal9tWBkkUMJYO/dLDVyMmAQxmIMDc=
+github.com/mgechev/revive v1.3.4/go.mod h1:W+pZCMu9qj8Uhfs1iJMQsEFLRozUfvwFwqVvRbSNLVw=
 github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -425,19 +439,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
 github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
 github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U=
 github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE=
-github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
-github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
 github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0=
 github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4=
 github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
 github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
-github.com/nunnatsa/ginkgolinter v0.13.3 h1:wEvjrzSMfDdnoWkctignX9QTf4rT9f4GkQ3uVoXBmiU=
-github.com/nunnatsa/ginkgolinter v0.13.3/go.mod h1:aTKXo8WddENYxNEFT+4ZxEgWXqlD9uMD3w9Bfw/ABEc=
+github.com/nunnatsa/ginkgolinter v0.14.1 h1:khx0CqR5U4ghsscjJ+lZVthp3zjIFytRXPTaQ/TMiyA=
+github.com/nunnatsa/ginkgolinter v0.14.1/go.mod h1:nY0pafUSst7v7F637e7fymaMlQqI9c0Wka2fGsDkzWg=
 github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
 github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
-github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
+github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
+github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA=
 github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw=
 github.com/otiai10/copy v1.12.0 h1:cLMgSQnXBs1eehF0Wy/FAGsgDTDmAqFR7rQylBb1nDY=
 github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
@@ -455,34 +466,34 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
 github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/polyfloyd/go-errorlint v1.4.3 h1:P6NALOLV8BrWhm6PsqOraUK05E5h8IZnpXYJ+CIg+0U=
-github.com/polyfloyd/go-errorlint v1.4.3/go.mod h1:VPlWPh6hB/wruVG803SuNpLuTGNjLHYlvcdSy4RhdPA=
+github.com/polyfloyd/go-errorlint v1.4.5 h1:70YWmMy4FgRHehGNOUask3HtSFSOLKgmDn7ryNe7LqI=
+github.com/polyfloyd/go-errorlint v1.4.5/go.mod h1:sIZEbFoDOCnTYYZoVkjc4hTnM459tuWA9H/EkdXwsKk=
 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
 github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
 github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
-github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
-github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
+github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
+github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
-github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
+github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
 github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
-github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
+github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
+github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
-github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
+github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
+github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
 github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo=
 github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10=
 github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo=
@@ -495,20 +506,20 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ
 github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
 github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
 github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
 github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw=
 github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50=
-github.com/ryanrolds/sqlclosecheck v0.4.0 h1:i8SX60Rppc1wRuyQjMciLqIzV3xnoHB7/tXbr6RGYNI=
-github.com/ryanrolds/sqlclosecheck v0.4.0/go.mod h1:TBRRjzL31JONc9i4XMinicuo+s+E8yKZ5FN8X3G6CKQ=
+github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU=
+github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ=
 github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc=
 github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
 github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw=
 github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ=
-github.com/sashamelentyev/usestdlibvars v1.23.0 h1:01h+/2Kd+NblNItNeux0veSL5cBF1jbEOPrEhDzGYq0=
-github.com/sashamelentyev/usestdlibvars v1.23.0/go.mod h1:YPwr/Y1LATzHI93CqoPUN/2BzGQ/6N/cl/KwgR0B/aU=
-github.com/securego/gosec/v2 v2.16.0 h1:Pi0JKoasQQ3NnoRao/ww/N/XdynIB9NRYYZT5CyOs5U=
-github.com/securego/gosec/v2 v2.16.0/go.mod h1:xvLcVZqUfo4aAQu56TNv7/Ltz6emAOQAEsrZrt7uGlI=
+github.com/sashamelentyev/usestdlibvars v1.24.0 h1:MKNzmXtGh5N0y74Z/CIaJh4GlB364l0K1RUT08WSWAc=
+github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSCnTVUC1OQP/bSiiBhq3OZE=
+github.com/securego/gosec/v2 v2.18.2 h1:DkDt3wCiOtAHf1XkiXZBhQ6m6mK/b9T/wD257R3/c+I=
+github.com/securego/gosec/v2 v2.18.2/go.mod h1:xUuqSF6i0So56Y2wwohWAmB07EdBkUN6crbLlHwbyJs=
 github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
 github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
 github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU=
@@ -554,7 +565,6 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
 github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
 github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -563,7 +573,6 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
 github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
@@ -577,8 +586,8 @@ github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA
 github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0=
 github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag=
 github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY=
-github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw=
-github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8=
+github.com/tetafro/godot v1.4.15 h1:QzdIs+XB8q+U1WmQEWKHQbKmCw06QuQM7gLx/dky2RM=
+github.com/tetafro/godot v1.4.15/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio=
 github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M=
 github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ=
 github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4=
@@ -591,14 +600,14 @@ github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81v
 github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4=
 github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=
 github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/uudashr/gocognit v1.0.7 h1:e9aFXgKgUJrQ5+bs61zBigmj7bFJ/5cC6HmMahVzuDo=
-github.com/uudashr/gocognit v1.0.7/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
+github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI=
+github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k=
 github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad h1:W0LEBv82YCGEtcmPA3uNZBI33/qF//HAAs3MawDjRa0=
 github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM=
 github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
 github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
-github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw=
-github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA=
+github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU=
+github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg=
 github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
 github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
 github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=
@@ -612,36 +621,36 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-gitlab.com/bosi/decorder v0.4.0 h1:HWuxAhSxIvsITcXeP+iIRg9d1cVfvVkmlF7M68GaoDY=
-gitlab.com/bosi/decorder v0.4.0/go.mod h1:xarnteyUoJiOTEldDysquWKTVDCKo2TOIOIibSuWqOg=
-go-simpler.org/assert v0.5.0 h1:+5L/lajuQtzmbtEfh69sr5cRf2/xZzyJhFjoOz/PPqs=
+gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4=
+gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA=
+go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E=
+go-simpler.org/sloglint v0.1.2 h1:IjdhF8NPxyn0Ckn2+fuIof7ntSnVUAqBFcQRrnG9AiM=
+go-simpler.org/sloglint v0.1.2/go.mod h1:2LL+QImPfTslD5muNPydAEYmpXIj6o/WYcqnJjLi4o4=
 go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
 go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
 go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.opentelemetry.io/build-tools v0.11.0 h1:yXTgCJM/vxWZEB8FbgVhKOAFnRlacG2Z3eoTQZ0/gYE=
-go.opentelemetry.io/build-tools v0.11.0/go.mod h1:GFpz8YD/DG5shfY1J2f3uuK88zr61U5rVRGOhKMDE9M=
-go.opentelemetry.io/build-tools/crosslink v0.11.0 h1:K0eJY/AT6SiIaoJSrQyiVquGErcJEHsx4oHkhxvpj9k=
-go.opentelemetry.io/build-tools/crosslink v0.11.0/go.mod h1:h5oxbHx+O50aO0/M7mFejZmd7cMONdsmmC+IOmgWoWw=
-go.opentelemetry.io/build-tools/dbotconf v0.11.0 h1:hG0Zyln9Vv+kwNC+ip/EUcLnd9osTZ8dOYOxe/lHZy4=
-go.opentelemetry.io/build-tools/dbotconf v0.11.0/go.mod h1:BxYX1iAki4EWzIVXeEPFM75ZWr9e9koqT7pTU5xzad4=
-go.opentelemetry.io/build-tools/gotmpl v0.11.0 h1:T2KJ7Eli7wLrp+8TXpUQ+Q+wAdZZDiyHYSvrpeER7Pc=
-go.opentelemetry.io/build-tools/gotmpl v0.11.0/go.mod h1:FzweYUfAJC1i5ATrtFI4KJggnO9QQGPdSVKWA8RHjdE=
-go.opentelemetry.io/build-tools/multimod v0.11.0 h1:QMo2Y4BlsTsWUR0LXV4gmiv5yEiX2iPLn2qAdAcCE6k=
-go.opentelemetry.io/build-tools/multimod v0.11.0/go.mod h1:EID7sjEGyk1FWzRdsV6rlWp43IIn8iHXGE5pM4TytyQ=
-go.opentelemetry.io/build-tools/semconvgen v0.11.0 h1:gQsNzy49l9JjNozybaRUl+vy0EMxYasV8w6aK+IWquc=
-go.opentelemetry.io/build-tools/semconvgen v0.11.0/go.mod h1:Zy04Bw3w3lT7mORe23V2BwjfJYpoza6Xz1XSMIrLTCg=
-go.tmz.dev/musttag v0.7.1 h1:9lFmeSFnFfPuMq4IksHGomItE6NgKMNW2Nt2FPOhCfU=
-go.tmz.dev/musttag v0.7.1/go.mod h1:oJLkpR56EsIryktZJk/B0IroSMi37YWver47fibGh5U=
-go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
-go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
+go.opentelemetry.io/build-tools v0.12.0 h1:ZqK1GuqBp9Mf1RthYO3/jjf9tPWzeHMcVDo0itFi/lI=
+go.opentelemetry.io/build-tools v0.12.0/go.mod h1:I76Qvv9cN055XJfTHw9t257EUd5Yp0EofeTMESlZuRU=
+go.opentelemetry.io/build-tools/crosslink v0.12.0 h1:GNJQURuabE5rAkIbnrqndIKyXrr7wFy54e/8ujkgjHg=
+go.opentelemetry.io/build-tools/crosslink v0.12.0/go.mod h1:QE8Kxf4Ygg2ltSHE+Vdys/67jtQM26j7spJLyjNA2DU=
+go.opentelemetry.io/build-tools/dbotconf v0.12.0 h1:I+oaEtAMK+nd660l//r14d3AI1A8BB3A4hKArvUX/n4=
+go.opentelemetry.io/build-tools/dbotconf v0.12.0/go.mod h1:K0Xszcb11bbFtVpjieY8gzGWLw9SNarDKvFW1Ti7w4U=
+go.opentelemetry.io/build-tools/gotmpl v0.12.0 h1:ysCtNFkoJddyaAdemtdbI6Qn7nb7GYn2WbHmajTW+pM=
+go.opentelemetry.io/build-tools/gotmpl v0.12.0/go.mod h1:FzweYUfAJC1i5ATrtFI4KJggnO9QQGPdSVKWA8RHjdE=
+go.opentelemetry.io/build-tools/multimod v0.12.0 h1:DKi+A+4EaKrOZDTNDDZz3ijiAduEQDo8j1rzWUaGUHo=
+go.opentelemetry.io/build-tools/multimod v0.12.0/go.mod h1:w03q3WgZs7reoBNnmfdClkKdTIA/IHM8ric5E2jEDD0=
+go.opentelemetry.io/build-tools/semconvgen v0.12.0 h1:AsjYFwo8sSLAjwjklj+yVwm2xogJUxRf5pxflATg9N0=
+go.opentelemetry.io/build-tools/semconvgen v0.12.0/go.mod h1:SRmou8pp+7gBmf1AvdxOTwVts74Syyrgm1/Qx7R8mis=
+go.tmz.dev/musttag v0.7.2 h1:1J6S9ipDbalBSODNT5jCep8dhZyMr4ttnjQagmGYR5s=
+go.tmz.dev/musttag v0.7.2/go.mod h1:m6q5NiiSKMnQYokefa2xGoyoXnrswCbJ0AWYzf4Zs28=
+go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
 go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
 go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
-go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo=
+go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so=
 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -655,8 +664,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0
 golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
 golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
 golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
-golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
-golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
+golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -703,8 +712,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
 golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
 golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
-golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -748,8 +757,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
 golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
 golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
 golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
-golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
+golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -773,8 +782,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
-golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
+golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -836,8 +845,8 @@ golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -845,7 +854,7 @@ golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
 golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
 golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
 golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
-golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
+golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -858,8 +867,8 @@ golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
 golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
-golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -933,8 +942,10 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
 golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
 golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
 golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
-golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
+golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
+golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
+golang.org/x/vuln v1.0.1 h1:KUas02EjQK5LTuIx1OylBQdKKZ9jeugs+HiqO5HormU=
+golang.org/x/vuln v1.0.1/go.mod h1:bb2hMwln/tqxg32BNY4CcxHWtHXuYa3SbIBmtsyjxtM=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1036,7 +1047,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
 gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
 gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
@@ -1059,16 +1069,16 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
 honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
 honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
-honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw=
-honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA=
+honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8=
+honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0=
 mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E=
 mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js=
 mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I=
 mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
 mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo=
 mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
-mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w=
-mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is=
+mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8 h1:VuJo4Mt0EVPychre4fNlDWDuE5AjXtPJpRUWqZDQhaI=
+mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8/go.mod h1:Oh/d7dEtzsNHGOq1Cdv8aMm3KdKhVvPbRQcM8WFpBR8=
 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
 rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/internal/tools/semconvkit/templates/doc.go.tmpl b/internal/tools/semconvkit/templates/doc.go.tmpl
index c8fd7b218ab..4a45fb295da 100644
--- a/internal/tools/semconvkit/templates/doc.go.tmpl
+++ b/internal/tools/semconvkit/templates/doc.go.tmpl
@@ -15,6 +15,6 @@
 // Package semconv implements OpenTelemetry semantic conventions.
 //
 // OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the {{.TagVer}} version of the OpenTelemetry specification.
+// patterns for OpenTelemetry things. This package represents the {{.TagVer}}
+// version of the OpenTelemetry semantic conventions.
 package semconv // import "go.opentelemetry.io/otel/semconv/{{.TagVer}}"
diff --git a/internal/tools/tools.go b/internal/tools/tools.go
index e4b8e4c0fd2..44d113314ed 100644
--- a/internal/tools/tools.go
+++ b/internal/tools/tools.go
@@ -31,4 +31,5 @@ import (
 	_ "go.opentelemetry.io/build-tools/semconvgen"
 	_ "golang.org/x/exp/cmd/gorelease"
 	_ "golang.org/x/tools/cmd/stringer"
+	_ "golang.org/x/vuln/cmd/govulncheck"
 )
diff --git a/metric/doc.go b/metric/doc.go
index ae24e448d91..54716e13b35 100644
--- a/metric/doc.go
+++ b/metric/doc.go
@@ -149,7 +149,7 @@ of [go.opentelemetry.io/otel/metric].
 
 Finally, an author can embed another implementation in theirs. The embedded
 implementation will be used for methods not defined by the author. For example,
-an author who want to default to silently dropping the call can use
+an author who wants to default to silently dropping the call can use
 [go.opentelemetry.io/otel/metric/noop]:
 
 	import "go.opentelemetry.io/otel/metric/noop"
diff --git a/metric/example_test.go b/metric/example_test.go
index c1f67a1af18..758dd6b57f1 100644
--- a/metric/example_test.go
+++ b/metric/example_test.go
@@ -16,18 +16,23 @@ package metric_test
 
 import (
 	"context"
+	"database/sql"
 	"fmt"
+	"net/http"
 	"runtime"
 	"time"
 
 	"go.opentelemetry.io/otel"
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/metric"
+	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
 )
 
+var meter = otel.Meter("my-service-meter")
+
 func ExampleMeter_synchronous() {
 	// Create a histogram using the global MeterProvider.
-	workDuration, err := otel.Meter("go.opentelemetry.io/otel/metric#SyncExample").Int64Histogram(
+	workDuration, err := meter.Int64Histogram(
 		"workDuration",
 		metric.WithUnit("ms"))
 	if err != nil {
@@ -43,8 +48,6 @@ func ExampleMeter_synchronous() {
 }
 
 func ExampleMeter_asynchronous_single() {
-	meter := otel.Meter("go.opentelemetry.io/otel/metric#AsyncExample")
-
 	_, err := meter.Int64ObservableGauge(
 		"DiskUsage",
 		metric.WithUnit("By"),
@@ -73,13 +76,19 @@ func ExampleMeter_asynchronous_single() {
 }
 
 func ExampleMeter_asynchronous_multiple() {
-	meter := otel.Meter("go.opentelemetry.io/otel/metric#MultiAsyncExample")
-
 	// This is just a sample of memory stats to record from the Memstats
-	heapAlloc, _ := meter.Int64ObservableUpDownCounter("heapAllocs")
-	gcCount, _ := meter.Int64ObservableCounter("gcCount")
+	heapAlloc, err := meter.Int64ObservableUpDownCounter("heapAllocs")
+	if err != nil {
+		fmt.Println("failed to register updown counter for heapAllocs")
+		panic(err)
+	}
+	gcCount, err := meter.Int64ObservableCounter("gcCount")
+	if err != nil {
+		fmt.Println("failed to register counter for gcCount")
+		panic(err)
+	}
 
-	_, err := meter.RegisterCallback(
+	_, err = meter.RegisterCallback(
 		func(_ context.Context, o metric.Observer) error {
 			memStats := &runtime.MemStats{}
 			// This call does work
@@ -98,3 +107,176 @@ func ExampleMeter_asynchronous_multiple() {
 		panic(err)
 	}
 }
+
+// Counters can be used to measure a non-negative, increasing value.
+//
+// Here's how you might report the number of calls for an HTTP handler.
+func ExampleMeter_counter() {
+	apiCounter, err := meter.Int64Counter(
+		"api.counter",
+		metric.WithDescription("Number of API calls."),
+		metric.WithUnit("{call}"),
+	)
+	if err != nil {
+		panic(err)
+	}
+	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+		apiCounter.Add(r.Context(), 1)
+
+		// do some work in an API call
+	})
+}
+
+// UpDown counters can increment and decrement, allowing you to observe
+// a cumulative value that goes up or down.
+//
+// Here's how you might report the number of items of some collection.
+func ExampleMeter_upDownCounter() {
+	var err error
+	itemsCounter, err := meter.Int64UpDownCounter(
+		"items.counter",
+		metric.WithDescription("Number of items."),
+		metric.WithUnit("{item}"),
+	)
+	if err != nil {
+		panic(err)
+	}
+
+	_ = func() {
+		// code that adds an item to the collection
+		itemsCounter.Add(context.Background(), 1)
+	}
+
+	_ = func() {
+		// code that removes an item from the collection
+		itemsCounter.Add(context.Background(), -1)
+	}
+}
+
+// Histograms are used to measure a distribution of values over time.
+//
+// Here's how you might report a distribution of response times for an HTTP handler.
+func ExampleMeter_histogram() {
+	histogram, err := meter.Float64Histogram(
+		"task.duration",
+		metric.WithDescription("The duration of task execution."),
+		metric.WithUnit("s"),
+		metric.WithExplicitBucketBoundaries(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1, 2.5, 5, 7.5, 10),
+	)
+	if err != nil {
+		panic(err)
+	}
+	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+		start := time.Now()
+
+		// do some work in an API call
+
+		duration := time.Since(start)
+		histogram.Record(r.Context(), duration.Seconds())
+	})
+}
+
+// Observable counters can be used to measure an additive, non-negative,
+// monotonically increasing value.
+//
+// Here's how you might report time since the application started.
+func ExampleMeter_observableCounter() {
+	start := time.Now()
+	if _, err := meter.Float64ObservableCounter(
+		"uptime",
+		metric.WithDescription("The duration since the application started."),
+		metric.WithUnit("s"),
+		metric.WithFloat64Callback(func(_ context.Context, o metric.Float64Observer) error {
+			o.Observe(float64(time.Since(start).Seconds()))
+			return nil
+		}),
+	); err != nil {
+		panic(err)
+	}
+}
+
+// Observable UpDown counters can increment and decrement, allowing you to measure
+// an additive, non-negative, non-monotonically increasing cumulative value.
+//
+// Here's how you might report some database metrics.
+func ExampleMeter_observableUpDownCounter() {
+	// The function registers asynchronous metrics for the provided db.
+	// Make sure to unregister metric.Registration before closing the provided db.
+	_ = func(db *sql.DB, meter metric.Meter, poolName string) (metric.Registration, error) {
+		max, err := meter.Int64ObservableUpDownCounter(
+			"db.client.connections.max",
+			metric.WithDescription("The maximum number of open connections allowed."),
+			metric.WithUnit("{connection}"),
+		)
+		if err != nil {
+			return nil, err
+		}
+
+		waitTime, err := meter.Int64ObservableUpDownCounter(
+			"db.client.connections.wait_time",
+			metric.WithDescription("The time it took to obtain an open connection from the pool."),
+			metric.WithUnit("ms"),
+		)
+		if err != nil {
+			return nil, err
+		}
+
+		reg, err := meter.RegisterCallback(
+			func(_ context.Context, o metric.Observer) error {
+				stats := db.Stats()
+				o.ObserveInt64(max, int64(stats.MaxOpenConnections))
+				o.ObserveInt64(waitTime, int64(stats.WaitDuration))
+				return nil
+			},
+			max,
+			waitTime,
+		)
+		if err != nil {
+			return nil, err
+		}
+		return reg, nil
+	}
+}
+
+// Observable Gauges should be used to measure non-additive values.
+//
+// Here's how you might report memory usage of the heap objects used
+// in application.
+func ExampleMeter_observableGauge() {
+	if _, err := meter.Int64ObservableGauge(
+		"memory.heap",
+		metric.WithDescription(
+			"Memory usage of the allocated heap objects.",
+		),
+		metric.WithUnit("By"),
+		metric.WithInt64Callback(func(_ context.Context, o metric.Int64Observer) error {
+			var m runtime.MemStats
+			runtime.ReadMemStats(&m)
+			o.Observe(int64(m.HeapAlloc))
+			return nil
+		}),
+	); err != nil {
+		panic(err)
+	}
+}
+
+// You can add Attributes by using the [WithAttributeSet] and [WithAttributes] options.
+//
+// Here's how you might add the HTTP status code attribute to your recordings.
+func ExampleMeter_attributes() {
+	apiCounter, err := meter.Int64UpDownCounter(
+		"api.finished.counter",
+		metric.WithDescription("Number of finished API calls."),
+		metric.WithUnit("{call}"),
+	)
+	if err != nil {
+		panic(err)
+	}
+	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+		// do some work in an API call and set the response HTTP status code
+		statusCode := http.StatusOK
+
+		apiCounter.Add(r.Context(), 1,
+			metric.WithAttributes(semconv.HTTPStatusCode(statusCode)))
+	})
+}
diff --git a/metric/go.mod b/metric/go.mod
index cf27a3f4dfa..d0596d4bea6 100644
--- a/metric/go.mod
+++ b/metric/go.mod
@@ -1,18 +1,18 @@
 module go.opentelemetry.io/otel/metric
 
-go 1.19
+go 1.20
 
 require (
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
+	go.opentelemetry.io/otel v1.21.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
-	github.com/go-logr/logr v1.2.4 // indirect
+	github.com/go-logr/logr v1.3.0 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/metric/go.sum b/metric/go.sum
index cc053d286a4..130a4f410be 100644
--- a/metric/go.sum
+++ b/metric/go.sum
@@ -1,11 +1,11 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
diff --git a/metric/instrument.go b/metric/instrument.go
index cdca00058c6..be89cd53341 100644
--- a/metric/instrument.go
+++ b/metric/instrument.go
@@ -39,6 +39,12 @@ type InstrumentOption interface {
 	Float64ObservableGaugeOption
 }
 
+// HistogramOption applies options to histogram instruments.
+type HistogramOption interface {
+	Int64HistogramOption
+	Float64HistogramOption
+}
+
 type descOpt string
 
 func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
@@ -171,6 +177,23 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob
 // The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
 func WithUnit(u string) InstrumentOption { return unitOpt(u) }
 
+// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
+//
+// This option is considered "advisory", and may be ignored by API implementations.
+func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
+
+type bucketOpt []float64
+
+func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+	c.explicitBucketBoundaries = o
+	return c
+}
+
+func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+	c.explicitBucketBoundaries = o
+	return c
+}
+
 // AddOption applies options to an addition measurement. See
 // [MeasurementOption] for other options that can be used as an AddOption.
 type AddOption interface {
diff --git a/metric/syncfloat64.go b/metric/syncfloat64.go
index f0b063721d8..0a4825ae6a7 100644
--- a/metric/syncfloat64.go
+++ b/metric/syncfloat64.go
@@ -147,8 +147,9 @@ type Float64Histogram interface {
 // Float64HistogramConfig contains options for synchronous counter instruments
 // that record int64 values.
 type Float64HistogramConfig struct {
-	description string
-	unit        string
+	description              string
+	unit                     string
+	explicitBucketBoundaries []float64
 }
 
 // NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
@@ -171,6 +172,11 @@ func (c Float64HistogramConfig) Unit() string {
 	return c.unit
 }
 
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+	return c.explicitBucketBoundaries
+}
+
 // Float64HistogramOption applies options to a [Float64HistogramConfig]. See
 // [InstrumentOption] for other options that can be used as a
 // Float64HistogramOption.
diff --git a/metric/syncfloat64_test.go b/metric/syncfloat64_test.go
index 7132680b131..4b3bbfdec29 100644
--- a/metric/syncfloat64_test.go
+++ b/metric/syncfloat64_test.go
@@ -51,3 +51,9 @@ type float64Config interface {
 	Description() string
 	Unit() string
 }
+
+func TestFloat64ExplicitBucketHistogramConfiguration(t *testing.T) {
+	bounds := []float64{0.1, 0.5, 1.0}
+	got := NewFloat64HistogramConfig(WithExplicitBucketBoundaries(bounds...))
+	assert.Equal(t, bounds, got.ExplicitBucketBoundaries(), "boundaries")
+}
diff --git a/metric/syncint64.go b/metric/syncint64.go
index 6f508eb66d4..56667d32fc0 100644
--- a/metric/syncint64.go
+++ b/metric/syncint64.go
@@ -147,8 +147,9 @@ type Int64Histogram interface {
 // Int64HistogramConfig contains options for synchronous counter instruments
 // that record int64 values.
 type Int64HistogramConfig struct {
-	description string
-	unit        string
+	description              string
+	unit                     string
+	explicitBucketBoundaries []float64
 }
 
 // NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
@@ -171,6 +172,11 @@ func (c Int64HistogramConfig) Unit() string {
 	return c.unit
 }
 
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+	return c.explicitBucketBoundaries
+}
+
 // Int64HistogramOption applies options to a [Int64HistogramConfig]. See
 // [InstrumentOption] for other options that can be used as an
 // Int64HistogramOption.
diff --git a/metric/syncint64_test.go b/metric/syncint64_test.go
index 51c02f5e041..ece9c7fbb50 100644
--- a/metric/syncint64_test.go
+++ b/metric/syncint64_test.go
@@ -51,3 +51,9 @@ type int64Config interface {
 	Description() string
 	Unit() string
 }
+
+func TestInt64ExplicitBucketHistogramConfiguration(t *testing.T) {
+	bounds := []float64{0.1, 0.5, 1.0}
+	got := NewInt64HistogramConfig(WithExplicitBucketBoundaries(bounds...))
+	assert.Equal(t, bounds, got.ExplicitBucketBoundaries(), "boundaries")
+}
diff --git a/propagation/propagation_test.go b/propagation/propagation_test.go
index 488d6551d5b..03fca2ff2d5 100644
--- a/propagation/propagation_test.go
+++ b/propagation/propagation_test.go
@@ -27,9 +27,7 @@ import (
 
 type ctxKeyType uint
 
-var (
-	ctxKey ctxKeyType
-)
+var ctxKey ctxKeyType
 
 type carrier []string
 
diff --git a/propagation/trace_context.go b/propagation/trace_context.go
index 902692da082..63e5d62221f 100644
--- a/propagation/trace_context.go
+++ b/propagation/trace_context.go
@@ -18,7 +18,7 @@ import (
 	"context"
 	"encoding/hex"
 	"fmt"
-	"regexp"
+	"strings"
 
 	"go.opentelemetry.io/otel/trace"
 )
@@ -28,6 +28,7 @@ const (
 	maxVersion        = 254
 	traceparentHeader = "traceparent"
 	tracestateHeader  = "tracestate"
+	delimiter         = "-"
 )
 
 // TraceContext is a propagator that supports the W3C Trace Context format
@@ -40,8 +41,10 @@ const (
 // their proprietary information.
 type TraceContext struct{}
 
-var _ TextMapPropagator = TraceContext{}
-var traceCtxRegExp = regexp.MustCompile("^(?P<version>[0-9a-f]{2})-(?P<traceID>[a-f0-9]{32})-(?P<spanID>[a-f0-9]{16})-(?P<traceFlags>[a-f0-9]{2})(?:-.*)?$")
+var (
+	_           TextMapPropagator = TraceContext{}
+	versionPart                   = fmt.Sprintf("%.2X", supportedVersion)
+)
 
 // Inject set tracecontext from the Context into the carrier.
 func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
@@ -57,12 +60,19 @@ func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
 	// Clear all flags other than the trace-context supported sampling bit.
 	flags := sc.TraceFlags() & trace.FlagsSampled
 
-	h := fmt.Sprintf("%.2x-%s-%s-%s",
-		supportedVersion,
-		sc.TraceID(),
-		sc.SpanID(),
-		flags)
-	carrier.Set(traceparentHeader, h)
+	var sb strings.Builder
+	sb.Grow(2 + 32 + 16 + 2 + 3)
+	_, _ = sb.WriteString(versionPart)
+	traceID := sc.TraceID()
+	spanID := sc.SpanID()
+	flagByte := [1]byte{byte(flags)}
+	var buf [32]byte
+	for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} {
+		_ = sb.WriteByte(delimiter[0])
+		n := hex.Encode(buf[:], src)
+		_, _ = sb.Write(buf[:n])
+	}
+	carrier.Set(traceparentHeader, sb.String())
 }
 
 // Extract reads tracecontext from the carrier into a returned Context.
@@ -84,21 +94,8 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
 		return trace.SpanContext{}
 	}
 
-	matches := traceCtxRegExp.FindStringSubmatch(h)
-
-	if len(matches) == 0 {
-		return trace.SpanContext{}
-	}
-
-	if len(matches) < 5 { // four subgroups plus the overall match
-		return trace.SpanContext{}
-	}
-
-	if len(matches[1]) != 2 {
-		return trace.SpanContext{}
-	}
-	ver, err := hex.DecodeString(matches[1])
-	if err != nil {
+	var ver [1]byte
+	if !extractPart(ver[:], &h, 2) {
 		return trace.SpanContext{}
 	}
 	version := int(ver[0])
@@ -106,36 +103,24 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
 		return trace.SpanContext{}
 	}
 
-	if version == 0 && len(matches) != 5 { // four subgroups plus the overall match
-		return trace.SpanContext{}
-	}
-
-	if len(matches[2]) != 32 {
-		return trace.SpanContext{}
-	}
-
 	var scc trace.SpanContextConfig
-
-	scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32])
-	if err != nil {
+	if !extractPart(scc.TraceID[:], &h, 32) {
 		return trace.SpanContext{}
 	}
-
-	if len(matches[3]) != 16 {
-		return trace.SpanContext{}
-	}
-	scc.SpanID, err = trace.SpanIDFromHex(matches[3])
-	if err != nil {
+	if !extractPart(scc.SpanID[:], &h, 16) {
 		return trace.SpanContext{}
 	}
 
-	if len(matches[4]) != 2 {
+	var opts [1]byte
+	if !extractPart(opts[:], &h, 2) {
 		return trace.SpanContext{}
 	}
-	opts, err := hex.DecodeString(matches[4])
-	if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) {
+	if version == 0 && (h != "" || opts[0] > 2) {
+		// version 0 not allow extra
+		// version 0 not allow other flag
 		return trace.SpanContext{}
 	}
+
 	// Clear all flags other than the trace-context supported sampling bit.
 	scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
 
@@ -153,6 +138,29 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
 	return sc
 }
 
+// upperHex detect hex is upper case Unicode characters.
+func upperHex(v string) bool {
+	for _, c := range v {
+		if c >= 'A' && c <= 'F' {
+			return true
+		}
+	}
+	return false
+}
+
+func extractPart(dst []byte, h *string, n int) bool {
+	part, left, _ := strings.Cut(*h, delimiter)
+	*h = left
+	// hex.Decode decodes unsupported upper-case characters, so exclude explicitly.
+	if len(part) != n || upperHex(part) {
+		return false
+	}
+	if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 {
+		return false
+	}
+	return true
+}
+
 // Fields returns the keys who's values are set with Inject.
 func (tc TraceContext) Fields() []string {
 	return []string{traceparentHeader, tracestateHeader}
diff --git a/requirements.txt b/requirements.txt
index 407f17489c6..e0a43e13840 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1 +1 @@
-codespell==2.2.4
+codespell==2.2.6
diff --git a/schema/go.mod b/schema/go.mod
index 674b0067f47..bfc2ab62789 100644
--- a/schema/go.mod
+++ b/schema/go.mod
@@ -1,15 +1,14 @@
 module go.opentelemetry.io/otel/schema
 
-go 1.19
+go 1.20
 
 require (
 	github.com/Masterminds/semver/v3 v3.2.1
 	github.com/stretchr/testify v1.8.4
-	gopkg.in/yaml.v2 v2.4.0
+	gopkg.in/yaml.v3 v3.0.1
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
diff --git a/schema/go.sum b/schema/go.sum
index f9c91c9cccd..839fe61dc7b 100644
--- a/schema/go.sum
+++ b/schema/go.sum
@@ -8,7 +8,5 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
 gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/schema/v1.0/parser.go b/schema/v1.0/parser.go
index a284606bd9c..75a09bb0687 100644
--- a/schema/v1.0/parser.go
+++ b/schema/v1.0/parser.go
@@ -18,7 +18,7 @@ import (
 	"io"
 	"os"
 
-	"gopkg.in/yaml.v2"
+	"gopkg.in/yaml.v3"
 
 	"go.opentelemetry.io/otel/schema/internal"
 	"go.opentelemetry.io/otel/schema/v1.0/ast"
@@ -43,6 +43,7 @@ func ParseFile(schemaFilePath string) (*ast.Schema, error) {
 func Parse(schemaFileContent io.Reader) (*ast.Schema, error) {
 	var ts ast.Schema
 	d := yaml.NewDecoder(schemaFileContent)
+	d.KnownFields(true)
 	err := d.Decode(&ts)
 	if err != nil {
 		return nil, err
diff --git a/schema/v1.0/parser_test.go b/schema/v1.0/parser_test.go
index ab47452db72..36aba51ea5e 100644
--- a/schema/v1.0/parser_test.go
+++ b/schema/v1.0/parser_test.go
@@ -168,6 +168,10 @@ func TestFailParseSchemaFile(t *testing.T) {
 	ts, err = ParseFile("testdata/invalid-schema-url.yaml")
 	assert.Error(t, err)
 	assert.Nil(t, ts)
+
+	ts, err = ParseFile("testdata/unknown-field.yaml")
+	assert.ErrorContains(t, err, "field Resources not found in type ast.VersionDef")
+	assert.Nil(t, ts)
 }
 
 func TestFailParseSchema(t *testing.T) {
diff --git a/schema/v1.0/testdata/unknown-field.yaml b/schema/v1.0/testdata/unknown-field.yaml
new file mode 100644
index 00000000000..0d344e44cd9
--- /dev/null
+++ b/schema/v1.0/testdata/unknown-field.yaml
@@ -0,0 +1,15 @@
+file_format: 1.0.0
+schema_url: https://opentelemetry.io/schemas/1.0.0
+
+versions:
+  1.1.0:
+    all: # Valid entry.
+      changes:
+        - rename_attributes:
+            k8s.cluster.name: kubernetes.cluster.name
+    Resources: # Invalid uppercase.
+      changes:
+        - rename_attributes:
+          attribute_map:
+            browser.user_agent: user_agent.original
+  1.0.0:
diff --git a/schema/v1.1/parser.go b/schema/v1.1/parser.go
index 1e1ca8db56c..43b70524f38 100644
--- a/schema/v1.1/parser.go
+++ b/schema/v1.1/parser.go
@@ -18,7 +18,7 @@ import (
 	"io"
 	"os"
 
-	"gopkg.in/yaml.v2"
+	"gopkg.in/yaml.v3"
 
 	"go.opentelemetry.io/otel/schema/internal"
 	"go.opentelemetry.io/otel/schema/v1.1/ast"
@@ -43,7 +43,7 @@ func ParseFile(schemaFilePath string) (*ast.Schema, error) {
 func Parse(schemaFileContent io.Reader) (*ast.Schema, error) {
 	var ts ast.Schema
 	d := yaml.NewDecoder(schemaFileContent)
-	d.SetStrict(true) // Do not silently drop unknown fields.
+	d.KnownFields(true)
 	err := d.Decode(&ts)
 	if err != nil {
 		return nil, err
diff --git a/sdk/go.mod b/sdk/go.mod
index 82fd372ede4..7583fc33c6e 100644
--- a/sdk/go.mod
+++ b/sdk/go.mod
@@ -1,23 +1,23 @@
 module go.opentelemetry.io/otel/sdk
 
-go 1.19
+go 1.20
 
 replace go.opentelemetry.io/otel => ../
 
 require (
-	github.com/go-logr/logr v1.2.4
-	github.com/google/go-cmp v0.5.9
+	github.com/go-logr/logr v1.3.0
+	github.com/google/go-cmp v0.6.0
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/trace v1.16.0
-	golang.org/x/sys v0.11.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/trace v1.21.0
+	golang.org/x/sys v0.15.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/metric v1.16.0 // indirect
+	go.opentelemetry.io/otel/metric v1.21.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/sdk/go.sum b/sdk/go.sum
index 7729e42a72e..939feb405bb 100644
--- a/sdk/go.sum
+++ b/sdk/go.sum
@@ -1,18 +1,18 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/sdk/internal/internaltest/harness.go b/sdk/internal/internaltest/harness.go
index f177b0567a9..b2d461ea0d9 100644
--- a/sdk/internal/internaltest/harness.go
+++ b/sdk/internal/internaltest/harness.go
@@ -263,7 +263,7 @@ func (h *Harness) TestTracer(subjectFactory func() trace.Tracer) {
 }
 
 func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
-	var methods = map[string]func(span trace.Span){
+	methods := map[string]func(span trace.Span){
 		"#End": func(span trace.Span) {
 			span.End()
 		},
@@ -283,7 +283,7 @@ func (h *Harness) testSpan(tracerFactory func() trace.Tracer) {
 			span.SetAttributes(attribute.String("key1", "value"), attribute.Int("key2", 123))
 		},
 	}
-	var mechanisms = map[string]func() trace.Span{
+	mechanisms := map[string]func() trace.Span{
 		"Span created via Tracer#Start": func() trace.Span {
 			tracer := tracerFactory()
 			_, subject := tracer.Start(context.Background(), "test")
diff --git a/sdk/internal/internaltest/text_map_carrier_test.go b/sdk/internal/internaltest/text_map_carrier_test.go
index faf713cc2d0..086c8af26ea 100644
--- a/sdk/internal/internaltest/text_map_carrier_test.go
+++ b/sdk/internal/internaltest/text_map_carrier_test.go
@@ -22,9 +22,7 @@ import (
 	"testing"
 )
 
-var (
-	key, value = "test", "true"
-)
+var key, value = "test", "true"
 
 func TestTextMapCarrierKeys(t *testing.T) {
 	tmc := NewTextMapCarrier(map[string]string{key: value})
diff --git a/sdk/internal/matchers/expectation.go b/sdk/internal/matchers/expectation.go
index 84764308651..c48aff036e7 100644
--- a/sdk/internal/matchers/expectation.go
+++ b/sdk/internal/matchers/expectation.go
@@ -27,9 +27,7 @@ import (
 	"time"
 )
 
-var (
-	stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`)
-)
+var stackTracePruneRE = regexp.MustCompile(`runtime\/debug|testing|internal\/matchers`)
 
 type Expectation struct {
 	t      *testing.T
diff --git a/sdk/metric/aggregation.go b/sdk/metric/aggregation.go
index 08ff6cc3dd8..faddbb0b61b 100644
--- a/sdk/metric/aggregation.go
+++ b/sdk/metric/aggregation.go
@@ -48,8 +48,8 @@ func (AggregationDrop) err() error { return nil }
 // make an aggregation selection based on instrument kind that differs from
 // the default. This Aggregation ensures the default is used.
 //
-// See the "go.opentelemetry.io/otel/sdk/metric".DefaultAggregationSelector
-// for information about the default instrument kind selection mapping.
+// See the [DefaultAggregationSelector] for information about the default
+// instrument kind selection mapping.
 type AggregationDefault struct{} // AggregationDefault has no parameters.
 
 var _ Aggregation = AggregationDefault{}
@@ -161,7 +161,7 @@ type AggregationBase2ExponentialHistogram struct {
 	// signed 32-bit integer index could be used.
 	//
 	// MaxScale has a minimum value of -10. Using a value of -10 means only
-	// two buckets will be use.
+	// two buckets will be used.
 	MaxScale int32
 
 	// NoMinMax indicates whether to not record the min and max of the
diff --git a/sdk/metric/aggregation/aggregation.go b/sdk/metric/aggregation/aggregation.go
deleted file mode 100644
index 5d5643eb294..00000000000
--- a/sdk/metric/aggregation/aggregation.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package aggregation contains configuration types that define the
-// aggregation operation used to summarizes recorded measurements.
-//
-// Deprecated: Use the aggregation types in go.opentelemetry.io/otel/sdk/metric
-// instead.
-package aggregation // import "go.opentelemetry.io/otel/sdk/metric/aggregation"
-
-import (
-	"errors"
-	"fmt"
-)
-
-// errAgg is wrapped by misconfigured aggregations.
-var errAgg = errors.New("aggregation")
-
-// Aggregation is the aggregation used to summarize recorded measurements.
-type Aggregation interface {
-	// private attempts to ensure no user-defined Aggregation are allowed. The
-	// OTel specification does not allow user-defined Aggregation currently.
-	private()
-
-	// Copy returns a deep copy of the Aggregation.
-	Copy() Aggregation
-
-	// Err returns an error for any misconfigured Aggregation.
-	Err() error
-}
-
-// Drop is an aggregation that drops all recorded data.
-type Drop struct{} // Drop has no parameters.
-
-var _ Aggregation = Drop{}
-
-func (Drop) private() {}
-
-// Copy returns a deep copy of d.
-func (d Drop) Copy() Aggregation { return d }
-
-// Err returns an error for any misconfiguration. A Drop aggregation has no
-// parameters and cannot be misconfigured, therefore this always returns nil.
-func (Drop) Err() error { return nil }
-
-// Default is an aggregation that uses the default instrument kind selection
-// mapping to select another aggregation. A metric reader can be configured to
-// make an aggregation selection based on instrument kind that differs from
-// the default. This aggregation ensures the default is used.
-//
-// See the "go.opentelemetry.io/otel/sdk/metric".DefaultAggregationSelector
-// for information about the default instrument kind selection mapping.
-type Default struct{} // Default has no parameters.
-
-var _ Aggregation = Default{}
-
-func (Default) private() {}
-
-// Copy returns a deep copy of d.
-func (d Default) Copy() Aggregation { return d }
-
-// Err returns an error for any misconfiguration. A Default aggregation has no
-// parameters and cannot be misconfigured, therefore this always returns nil.
-func (Default) Err() error { return nil }
-
-// Sum is an aggregation that summarizes a set of measurements as their
-// arithmetic sum.
-type Sum struct{} // Sum has no parameters.
-
-var _ Aggregation = Sum{}
-
-func (Sum) private() {}
-
-// Copy returns a deep copy of s.
-func (s Sum) Copy() Aggregation { return s }
-
-// Err returns an error for any misconfiguration. A Sum aggregation has no
-// parameters and cannot be misconfigured, therefore this always returns nil.
-func (Sum) Err() error { return nil }
-
-// LastValue is an aggregation that summarizes a set of measurements as the
-// last one made.
-type LastValue struct{} // LastValue has no parameters.
-
-var _ Aggregation = LastValue{}
-
-func (LastValue) private() {}
-
-// Copy returns a deep copy of l.
-func (l LastValue) Copy() Aggregation { return l }
-
-// Err returns an error for any misconfiguration. A LastValue aggregation has
-// no parameters and cannot be misconfigured, therefore this always returns
-// nil.
-func (LastValue) Err() error { return nil }
-
-// ExplicitBucketHistogram is an aggregation that summarizes a set of
-// measurements as an histogram with explicitly defined buckets.
-type ExplicitBucketHistogram struct {
-	// Boundaries are the increasing bucket boundary values. Boundary values
-	// define bucket upper bounds. Buckets are exclusive of their lower
-	// boundary and inclusive of their upper bound (except at positive
-	// infinity). A measurement is defined to fall into the greatest-numbered
-	// bucket with a boundary that is greater than or equal to the
-	// measurement. As an example, boundaries defined as:
-	//
-	// []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}
-	//
-	// Will define these buckets:
-	//
-	// (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0],
-	// (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0],
-	// (500.0, 1000.0], (1000.0, +∞)
-	Boundaries []float64
-	// NoMinMax indicates whether to not record the min and max of the
-	// distribution. By default, these extrema are recorded.
-	//
-	// Recording these extrema for cumulative data is expected to have little
-	// value, they will represent the entire life of the instrument instead of
-	// just the current collection cycle. It is recommended to set this to true
-	// for that type of data to avoid computing the low-value extrema.
-	NoMinMax bool
-}
-
-var _ Aggregation = ExplicitBucketHistogram{}
-
-func (ExplicitBucketHistogram) private() {}
-
-// errHist is returned by misconfigured ExplicitBucketHistograms.
-var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg)
-
-// Err returns an error for any misconfiguration.
-func (h ExplicitBucketHistogram) Err() error {
-	if len(h.Boundaries) <= 1 {
-		return nil
-	}
-
-	// Check boundaries are monotonic.
-	i := h.Boundaries[0]
-	for _, j := range h.Boundaries[1:] {
-		if i >= j {
-			return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries)
-		}
-		i = j
-	}
-
-	return nil
-}
-
-// Copy returns a deep copy of h.
-func (h ExplicitBucketHistogram) Copy() Aggregation {
-	b := make([]float64, len(h.Boundaries))
-	copy(b, h.Boundaries)
-	return ExplicitBucketHistogram{
-		Boundaries: b,
-		NoMinMax:   h.NoMinMax,
-	}
-}
-
-// Base2ExponentialHistogram is an aggregation that summarizes a set of
-// measurements as an histogram with bucket widths that grow exponentially.
-type Base2ExponentialHistogram struct {
-	// MaxSize is the maximum number of buckets to use for the histogram.
-	MaxSize int32
-	// MaxScale is the maximum resolution scale to use for the histogram.
-	//
-	// MaxScale has a maximum value of 20. Using a value of 20 means the
-	// maximum number of buckets that can fit within the range of a
-	// signed 32-bit integer index could be used.
-	//
-	// MaxScale has a minimum value of -10. Using a value of -10 means only
-	// two buckets will be use.
-	MaxScale int32
-
-	// NoMinMax indicates whether to not record the min and max of the
-	// distribution. By default, these extrema are recorded.
-	//
-	// Recording these extrema for cumulative data is expected to have little
-	// value, they will represent the entire life of the instrument instead of
-	// just the current collection cycle. It is recommended to set this to true
-	// for that type of data to avoid computing the low-value extrema.
-	NoMinMax bool
-}
-
-var _ Aggregation = Base2ExponentialHistogram{}
-
-// private attempts to ensure no user-defined Aggregation is allowed. The
-// OTel specification does not allow user-defined Aggregation currently.
-func (e Base2ExponentialHistogram) private() {}
-
-// Copy returns a deep copy of the Aggregation.
-func (e Base2ExponentialHistogram) Copy() Aggregation {
-	return e
-}
-
-const (
-	expoMaxScale = 20
-	expoMinScale = -10
-)
-
-// errExpoHist is returned by misconfigured Base2ExponentialBucketHistograms.
-var errExpoHist = fmt.Errorf("%w: exponential histogram", errAgg)
-
-// Err returns an error for any misconfigured Aggregation.
-func (e Base2ExponentialHistogram) Err() error {
-	if e.MaxScale > expoMaxScale {
-		return fmt.Errorf("%w: max size %d is greater than maximum scale %d", errExpoHist, e.MaxSize, expoMaxScale)
-	}
-	if e.MaxSize <= 0 {
-		return fmt.Errorf("%w: max size %d is less than or equal to zero", errExpoHist, e.MaxSize)
-	}
-	return nil
-}
diff --git a/sdk/metric/aggregation/aggregation_test.go b/sdk/metric/aggregation/aggregation_test.go
deleted file mode 100644
index 15bc37f2500..00000000000
--- a/sdk/metric/aggregation/aggregation_test.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aggregation
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestAggregationErr(t *testing.T) {
-	t.Run("DropOperation", func(t *testing.T) {
-		assert.NoError(t, Drop{}.Err())
-	})
-
-	t.Run("SumOperation", func(t *testing.T) {
-		assert.NoError(t, Sum{}.Err())
-	})
-
-	t.Run("LastValueOperation", func(t *testing.T) {
-		assert.NoError(t, LastValue{}.Err())
-	})
-
-	t.Run("ExplicitBucketHistogramOperation", func(t *testing.T) {
-		assert.NoError(t, ExplicitBucketHistogram{}.Err())
-
-		assert.NoError(t, ExplicitBucketHistogram{
-			Boundaries: []float64{0},
-			NoMinMax:   true,
-		}.Err())
-
-		assert.NoError(t, ExplicitBucketHistogram{
-			Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000},
-		}.Err())
-	})
-
-	t.Run("NonmonotonicHistogramBoundaries", func(t *testing.T) {
-		assert.ErrorIs(t, ExplicitBucketHistogram{
-			Boundaries: []float64{2, 1},
-		}.Err(), errAgg)
-
-		assert.ErrorIs(t, ExplicitBucketHistogram{
-			Boundaries: []float64{0, 1, 2, 1, 3, 4},
-		}.Err(), errAgg)
-	})
-
-	t.Run("ExponentialHistogramOperation", func(t *testing.T) {
-		assert.NoError(t, Base2ExponentialHistogram{
-			MaxSize:  160,
-			MaxScale: 20,
-		}.Err())
-
-		assert.NoError(t, Base2ExponentialHistogram{
-			MaxSize:  1,
-			NoMinMax: true,
-		}.Err())
-
-		assert.NoError(t, Base2ExponentialHistogram{
-			MaxSize:  1024,
-			MaxScale: -3,
-		}.Err())
-	})
-
-	t.Run("InvalidExponentialHistogramOperation", func(t *testing.T) {
-		// MazSize must be greater than 0
-		assert.ErrorIs(t, Base2ExponentialHistogram{}.Err(), errAgg)
-
-		// MaxScale Must be <=20
-		assert.ErrorIs(t, Base2ExponentialHistogram{
-			MaxSize:  1,
-			MaxScale: 30,
-		}.Err(), errAgg)
-	})
-}
-
-func TestExplicitBucketHistogramDeepCopy(t *testing.T) {
-	const orig = 0.0
-	b := []float64{orig}
-	h := ExplicitBucketHistogram{Boundaries: b}
-	cpH := h.Copy().(ExplicitBucketHistogram)
-	b[0] = orig + 1
-	assert.Equal(t, orig, cpH.Boundaries[0], "changing the underlying slice data should not affect the copy")
-}
diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go
index dd75de3cd63..90f88088630 100644
--- a/sdk/metric/benchmark_test.go
+++ b/sdk/metric/benchmark_test.go
@@ -42,7 +42,7 @@ var viewBenchmarks = []struct {
 		"AttrFilterView",
 		[]View{NewView(
 			Instrument{Name: "*"},
-			Stream{AllowAttributeKeys: []attribute.Key{"K"}},
+			Stream{AttributeFilter: attribute.NewAllowKeysFilter("K")},
 		)},
 	},
 }
diff --git a/sdk/metric/config_test.go b/sdk/metric/config_test.go
index ae7159f2d2e..42bf16a6a96 100644
--- a/sdk/metric/config_test.go
+++ b/sdk/metric/config_test.go
@@ -47,6 +47,7 @@ func (r *reader) RegisterProducer(p Producer) { r.externalProducers = append(r.e
 func (r *reader) temporality(kind InstrumentKind) metricdata.Temporality {
 	return r.temporalityFunc(kind)
 }
+
 func (r *reader) Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error {
 	return r.collectFunc(ctx, rm)
 }
diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go
index 92878ce8bc2..53f80c42893 100644
--- a/sdk/metric/doc.go
+++ b/sdk/metric/doc.go
@@ -12,7 +12,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package metric provides an implementation of the OpenTelemetry metric SDK.
+// Package metric provides an implementation of the OpenTelemetry metrics SDK.
 //
 // See https://opentelemetry.io/docs/concepts/signals/metrics/ for information
 // about the concept of OpenTelemetry metrics and
@@ -27,8 +27,8 @@
 // A MeterProvider needs to be configured to export the measured data, this is
 // done by configuring it with a Reader implementation (using the WithReader
 // MeterProviderOption). Readers take two forms: ones that push to an endpoint
-// (NewPeriodicReader), and ones that an endpoint pulls from. See the
-// go.opentelemetry.io/otel/exporters package for exporters that can be used as
+// (NewPeriodicReader), and ones that an endpoint pulls from. See
+// [go.opentelemetry.io/otel/exporters] for exporters that can be used as
 // or with these Readers.
 //
 // Each Reader, when registered with the MeterProvider, can be augmented with a
@@ -41,4 +41,7 @@
 // should be used to describe the unique runtime environment instrumented code
 // is being run on. That way when multiple instances of the code are collected
 // at a single endpoint their origin is decipherable.
+//
+// See [go.opentelemetry.io/otel/metric] for more information about
+// the metric API.
 package metric // import "go.opentelemetry.io/otel/sdk/metric"
diff --git a/sdk/metric/example_test.go b/sdk/metric/example_test.go
index cf8728deb0f..81a59343bea 100644
--- a/sdk/metric/example_test.go
+++ b/sdk/metric/example_test.go
@@ -16,45 +16,238 @@ package metric_test
 
 import (
 	"context"
+	"fmt"
 	"log"
+	"regexp"
 
 	"go.opentelemetry.io/otel"
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/sdk/instrumentation"
 	"go.opentelemetry.io/otel/sdk/metric"
 	"go.opentelemetry.io/otel/sdk/resource"
 	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
 )
 
+// To enable metrics in your application using the SDK,
+// you'll need to have an initialized [MeterProvider]
+// that will let you create a [go.opentelemetry.io/otel/metric.Meter].
+//
+// Here's how you might initialize a metrics provider.
 func Example() {
+	// Create resource.
+	res, err := resource.Merge(resource.Default(),
+		resource.NewWithAttributes(semconv.SchemaURL,
+			semconv.ServiceName("my-service"),
+			semconv.ServiceVersion("0.1.0"),
+		))
+	if err != nil {
+		log.Fatalln(err)
+	}
+
 	// This reader is used as a stand-in for a reader that will actually export
-	// data. See exporters in the go.opentelemetry.io/otel/exporters package
-	// for more information.
+	// data. See https://pkg.go.dev/go.opentelemetry.io/otel/exporters for
+	// exporters that can be used as or with readers.
 	reader := metric.NewManualReader()
 
-	// See the go.opentelemetry.io/otel/sdk/resource package for more
-	// information about how to create and use Resources.
-	res := resource.NewWithAttributes(
-		semconv.SchemaURL,
-		semconv.ServiceName("my-service"),
-		semconv.ServiceVersion("v0.1.0"),
-	)
-
+	// Create a meter provider.
+	// You can pass this instance directly to your instrumented code if it
+	// accepts a MeterProvider instance.
 	meterProvider := metric.NewMeterProvider(
 		metric.WithResource(res),
 		metric.WithReader(reader),
 	)
-	otel.SetMeterProvider(meterProvider)
+
+	// Handle shutdown properly so that nothing leaks.
 	defer func() {
 		err := meterProvider.Shutdown(context.Background())
 		if err != nil {
 			log.Fatalln(err)
 		}
 	}()
-	// The MeterProvider is configured and registered globally. You can now run
-	// your code instrumented with the OpenTelemetry API that uses the global
-	// MeterProvider without having to pass this MeterProvider instance. Or,
-	// you can pass this instance directly to your instrumented code if it
-	// accepts a MeterProvider instance.
+
+	// Register as global meter provider so that it can be used via otel.Meter
+	// and accessed using otel.GetMeterProvider.
+	// Most instrumentation libraries use the global meter provider as default.
+	// If the global meter provider is not set then a no-op implementation
+	// is used, which fails to generate data.
+	otel.SetMeterProvider(meterProvider)
+}
+
+func ExampleView() {
+	// The NewView function provides convenient creation of common Views
+	// construction. However, it is limited in what it can create.
 	//
-	// See the go.opentelemetry.io/otel/metric package for more information
-	// about the metric API.
+	// When NewView is not able to provide the functionally needed, a custom
+	// View can be constructed directly. Here a custom View is constructed that
+	// uses Go's regular expression matching to ensure all data stream names
+	// have a suffix of the units it uses.
+
+	re := regexp.MustCompile(`[._](ms|byte)$`)
+	var view metric.View = func(i metric.Instrument) (metric.Stream, bool) {
+		// In a custom View function, you need to explicitly copy
+		// the name, description, and unit.
+		s := metric.Stream{Name: i.Name, Description: i.Description, Unit: i.Unit}
+		// Any instrument that does not have a unit suffix defined, but has a
+		// dimensional unit defined, update the name with a unit suffix.
+		if re.MatchString(i.Name) {
+			return s, false
+		}
+		switch i.Unit {
+		case "ms":
+			s.Name += ".ms"
+		case "By":
+			s.Name += ".byte"
+		default:
+			return s, false
+		}
+		return s, true
+	}
+
+	// The created view can then be registered with the OpenTelemetry metric
+	// SDK using the WithView option.
+	_ = metric.NewMeterProvider(
+		metric.WithView(view),
+	)
+
+	// Below is an example of how the view will
+	// function in the SDK for certain instruments.
+	stream, _ := view(metric.Instrument{
+		Name: "computation.time.ms",
+		Unit: "ms",
+	})
+	fmt.Println("name:", stream.Name)
+
+	stream, _ = view(metric.Instrument{
+		Name: "heap.size",
+		Unit: "By",
+	})
+	fmt.Println("name:", stream.Name)
+	// Output:
+	// name: computation.time.ms
+	// name: heap.size.byte
+}
+
+func ExampleNewView() {
+	// Create a view that renames the "latency" instrument from the v0.34.0
+	// version of the "http" instrumentation library as "request.latency".
+	view := metric.NewView(metric.Instrument{
+		Name: "latency",
+		Scope: instrumentation.Scope{
+			Name:    "http",
+			Version: "0.34.0",
+		},
+	}, metric.Stream{Name: "request.latency"})
+
+	// The created view can then be registered with the OpenTelemetry metric
+	// SDK using the WithView option.
+	_ = metric.NewMeterProvider(
+		metric.WithView(view),
+	)
+
+	// Below is an example of how the view will
+	// function in the SDK for certain instruments.
+	stream, _ := view(metric.Instrument{
+		Name:        "latency",
+		Description: "request latency",
+		Unit:        "ms",
+		Kind:        metric.InstrumentKindCounter,
+		Scope: instrumentation.Scope{
+			Name:      "http",
+			Version:   "0.34.0",
+			SchemaURL: "https://opentelemetry.io/schemas/1.0.0",
+		},
+	})
+	fmt.Println("name:", stream.Name)
+	fmt.Println("description:", stream.Description)
+	fmt.Println("unit:", stream.Unit)
+	// Output:
+	// name: request.latency
+	// description: request latency
+	// unit: ms
+}
+
+func ExampleNewView_wildcard() {
+	// Create a view that sets unit to milliseconds for any instrument with a
+	// name suffix of ".ms".
+	view := metric.NewView(
+		metric.Instrument{Name: "*.ms"},
+		metric.Stream{Unit: "ms"},
+	)
+
+	// The created view can then be registered with the OpenTelemetry metric
+	// SDK using the WithView option.
+	_ = metric.NewMeterProvider(
+		metric.WithView(view),
+	)
+
+	// Below is an example of how the view will
+	// function in the SDK for certain instruments.
+	stream, _ := view(metric.Instrument{
+		Name: "computation.time.ms",
+		Unit: "1",
+	})
+	fmt.Println("name:", stream.Name)
+	fmt.Println("unit:", stream.Unit)
+	// Output:
+	// name: computation.time.ms
+	// unit: ms
+}
+
+func ExampleNewView_drop() {
+	// Create a view that drops the "latency" instrument from the "http"
+	// instrumentation library.
+	view := metric.NewView(
+		metric.Instrument{
+			Name:  "latency",
+			Scope: instrumentation.Scope{Name: "http"},
+		},
+		metric.Stream{Aggregation: metric.AggregationDrop{}},
+	)
+
+	// The created view can then be registered with the OpenTelemetry metric
+	// SDK using the WithView option.
+	_ = metric.NewMeterProvider(
+		metric.WithView(view),
+	)
+}
+
+func ExampleNewView_attributeFilter() {
+	// Create a view that removes the "http.request.method" attribute recorded
+	// by the "latency" instrument from the "http" instrumentation library.
+	view := metric.NewView(
+		metric.Instrument{
+			Name:  "latency",
+			Scope: instrumentation.Scope{Name: "http"},
+		},
+		metric.Stream{AttributeFilter: attribute.NewDenyKeysFilter("http.request.method")},
+	)
+
+	// The created view can then be registered with the OpenTelemetry metric
+	// SDK using the WithView option.
+	_ = metric.NewMeterProvider(
+		metric.WithView(view),
+	)
+}
+
+func ExampleNewView_exponentialHistogram() {
+	// Create a view that makes the "latency" instrument from the "http"
+	// instrumentation library to be reported as an exponential histogram.
+	view := metric.NewView(
+		metric.Instrument{
+			Name:  "latency",
+			Scope: instrumentation.Scope{Name: "http"},
+		},
+		metric.Stream{
+			Aggregation: metric.AggregationBase2ExponentialHistogram{
+				MaxSize:  160,
+				MaxScale: 20,
+			},
+		},
+	)
+
+	// The created view can then be registered with the OpenTelemetry metric
+	// SDK using the WithView option.
+	_ = metric.NewMeterProvider(
+		metric.WithView(view),
+	)
 }
diff --git a/sdk/metric/exporter.go b/sdk/metric/exporter.go
index 695cf466c0e..da8941b378d 100644
--- a/sdk/metric/exporter.go
+++ b/sdk/metric/exporter.go
@@ -33,12 +33,16 @@ type Exporter interface {
 	// This method needs to be concurrent safe with itself and all the other
 	// Exporter methods.
 	Temporality(InstrumentKind) metricdata.Temporality
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
 
 	// Aggregation returns the Aggregation to use for an instrument kind.
 	//
 	// This method needs to be concurrent safe with itself and all the other
 	// Exporter methods.
 	Aggregation(InstrumentKind) Aggregation
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
 
 	// Export serializes and transmits metric data to a receiver.
 	//
@@ -55,6 +59,8 @@ type Exporter interface {
 	// exporter needs to hold this data after it returns, it needs to make a
 	// copy.
 	Export(context.Context, *metricdata.ResourceMetrics) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
 
 	// ForceFlush flushes any metric data held by an exporter.
 	//
@@ -63,6 +69,8 @@ type Exporter interface {
 	//
 	// This method needs to be concurrent safe.
 	ForceFlush(context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
 
 	// Shutdown flushes all metric data held by an exporter and releases any
 	// held computational resources.
@@ -75,4 +83,6 @@ type Exporter interface {
 	//
 	// This method needs to be concurrent safe.
 	Shutdown(context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
 }
diff --git a/sdk/metric/go.mod b/sdk/metric/go.mod
index f7f92f4ecc2..24325464289 100644
--- a/sdk/metric/go.mod
+++ b/sdk/metric/go.mod
@@ -1,21 +1,21 @@
 module go.opentelemetry.io/otel/sdk/metric
 
-go 1.19
+go 1.20
 
 require (
-	github.com/go-logr/logr v1.2.4
+	github.com/go-logr/logr v1.3.0
 	github.com/go-logr/stdr v1.2.2
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
-	go.opentelemetry.io/otel/metric v1.16.0
-	go.opentelemetry.io/otel/sdk v1.16.0
+	go.opentelemetry.io/otel v1.21.0
+	go.opentelemetry.io/otel/metric v1.21.0
+	go.opentelemetry.io/otel/sdk v1.21.0
 )
 
 require (
 	github.com/davecgh/go-spew v1.1.1 // indirect
 	github.com/pmezard/go-difflib v1.0.0 // indirect
-	go.opentelemetry.io/otel/trace v1.16.0 // indirect
-	golang.org/x/sys v0.11.0 // indirect
+	go.opentelemetry.io/otel/trace v1.21.0 // indirect
+	golang.org/x/sys v0.15.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
 
diff --git a/sdk/metric/go.sum b/sdk/metric/go.sum
index 594aa686923..94020957893 100644
--- a/sdk/metric/go.sum
+++ b/sdk/metric/go.sum
@@ -1,17 +1,17 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
-github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
+github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
diff --git a/sdk/metric/instrument.go b/sdk/metric/instrument.go
index c09c89361c6..bb52f6ec717 100644
--- a/sdk/metric/instrument.go
+++ b/sdk/metric/instrument.go
@@ -146,31 +146,14 @@ type Stream struct {
 	Unit string
 	// Aggregation the stream uses for an instrument.
 	Aggregation Aggregation
-	// AllowAttributeKeys are an allow-list of attribute keys that will be
-	// preserved for the stream. Any attribute recorded for the stream with a
-	// key not in this slice will be dropped.
+	// AttributeFilter is an attribute Filter applied to the attributes
+	// recorded for an instrument's measurement. If the filter returns false
+	// the attribute will not be recorded, otherwise, if it returns true, it
+	// will record the attribute.
 	//
-	// If this slice is empty, all attributes will be kept.
-	AllowAttributeKeys []attribute.Key
-}
-
-// attributeFilter returns an attribute.Filter that only allows attributes
-// with keys in s.AttributeKeys.
-//
-// If s.AttributeKeys is empty an accept-all filter is returned.
-func (s Stream) attributeFilter() attribute.Filter {
-	if len(s.AllowAttributeKeys) <= 0 {
-		return func(kv attribute.KeyValue) bool { return true }
-	}
-
-	allowed := make(map[attribute.Key]struct{})
-	for _, k := range s.AllowAttributeKeys {
-		allowed[k] = struct{}{}
-	}
-	return func(kv attribute.KeyValue) bool {
-		_, ok := allowed[kv.Key]
-		return ok
-	}
+	// Use NewAllowKeysFilter from "go.opentelemetry.io/otel/attribute" to
+	// provide an allow-list of attribute keys here.
+	AttributeFilter attribute.Filter
 }
 
 // instID are the identifying properties of a instrument.
@@ -205,9 +188,11 @@ type int64Inst struct {
 	embedded.Int64Histogram
 }
 
-var _ metric.Int64Counter = (*int64Inst)(nil)
-var _ metric.Int64UpDownCounter = (*int64Inst)(nil)
-var _ metric.Int64Histogram = (*int64Inst)(nil)
+var (
+	_ metric.Int64Counter       = (*int64Inst)(nil)
+	_ metric.Int64UpDownCounter = (*int64Inst)(nil)
+	_ metric.Int64Histogram     = (*int64Inst)(nil)
+)
 
 func (i *int64Inst) Add(ctx context.Context, val int64, opts ...metric.AddOption) {
 	c := metric.NewAddConfig(opts)
@@ -236,9 +221,11 @@ type float64Inst struct {
 	embedded.Float64Histogram
 }
 
-var _ metric.Float64Counter = (*float64Inst)(nil)
-var _ metric.Float64UpDownCounter = (*float64Inst)(nil)
-var _ metric.Float64Histogram = (*float64Inst)(nil)
+var (
+	_ metric.Float64Counter       = (*float64Inst)(nil)
+	_ metric.Float64UpDownCounter = (*float64Inst)(nil)
+	_ metric.Float64Histogram     = (*float64Inst)(nil)
+)
 
 func (i *float64Inst) Add(ctx context.Context, val float64, opts ...metric.AddOption) {
 	c := metric.NewAddConfig(opts)
@@ -277,9 +264,11 @@ type float64Observable struct {
 	embedded.Float64ObservableGauge
 }
 
-var _ metric.Float64ObservableCounter = float64Observable{}
-var _ metric.Float64ObservableUpDownCounter = float64Observable{}
-var _ metric.Float64ObservableGauge = float64Observable{}
+var (
+	_ metric.Float64ObservableCounter       = float64Observable{}
+	_ metric.Float64ObservableUpDownCounter = float64Observable{}
+	_ metric.Float64ObservableGauge         = float64Observable{}
+)
 
 func newFloat64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[float64]) float64Observable {
 	return float64Observable{
@@ -296,9 +285,11 @@ type int64Observable struct {
 	embedded.Int64ObservableGauge
 }
 
-var _ metric.Int64ObservableCounter = int64Observable{}
-var _ metric.Int64ObservableUpDownCounter = int64Observable{}
-var _ metric.Int64ObservableGauge = int64Observable{}
+var (
+	_ metric.Int64ObservableCounter       = int64Observable{}
+	_ metric.Int64ObservableUpDownCounter = int64Observable{}
+	_ metric.Int64ObservableGauge         = int64Observable{}
+)
 
 func newInt64Observable(m *meter, kind InstrumentKind, name, desc, u string, meas []aggregate.Measure[int64]) int64Observable {
 	return int64Observable{
diff --git a/sdk/metric/internal/aggregate/exponential_histogram.go b/sdk/metric/internal/aggregate/exponential_histogram.go
index 006e03c5ddc..91099b32d89 100644
--- a/sdk/metric/internal/aggregate/exponential_histogram.go
+++ b/sdk/metric/internal/aggregate/exponential_histogram.go
@@ -113,7 +113,7 @@ func (p *expoHistogramDataPoint[N]) record(v N) {
 			otel.Handle(errors.New("exponential histogram scale underflow"))
 			return
 		}
-		//Downscale
+		// Downscale
 		p.scale -= scaleDelta
 		p.posBuckets.downscale(scaleDelta)
 		p.negBuckets.downscale(scaleDelta)
diff --git a/sdk/metric/internal/aggregate/exponential_histogram_test.go b/sdk/metric/internal/aggregate/exponential_histogram_test.go
index 5d7e534ca56..8442881f4e0 100644
--- a/sdk/metric/internal/aggregate/exponential_histogram_test.go
+++ b/sdk/metric/internal/aggregate/exponential_histogram_test.go
@@ -51,16 +51,13 @@ func TestExpoHistogramDataPointRecord(t *testing.T) {
 	t.Run("int64 MinMaxSum", testExpoHistogramMinMaxSumInt64)
 }
 
-// TODO: This can be defined in the test after we drop support for go1.19.
-type expoHistogramDataPointRecordTestCase[N int64 | float64] struct {
-	maxSize         int
-	values          []N
-	expectedBuckets expoBuckets
-	expectedScale   int
-}
-
 func testExpoHistogramDataPointRecord[N int64 | float64](t *testing.T) {
-	testCases := []expoHistogramDataPointRecordTestCase[N]{
+	testCases := []struct {
+		maxSize         int
+		values          []N
+		expectedBuckets expoBuckets
+		expectedScale   int
+	}{
 		{
 			maxSize: 4,
 			values:  []N{2, 4, 1},
@@ -679,7 +676,7 @@ func BenchmarkPrepend(b *testing.B) {
 
 func BenchmarkAppend(b *testing.B) {
 	for i := 0; i < b.N; i++ {
-		agg := newExpoHistogramDataPoint[float64](1024, 200, false, false)
+		agg := newExpoHistogramDataPoint[float64](1024, 20, false, false)
 		n := smallestNonZeroNormalFloat64
 		for j := 0; j < 1024; j++ {
 			agg.record(n)
@@ -746,15 +743,6 @@ func TestExponentialHistogramAggregation(t *testing.T) {
 	t.Run("Float64", testExponentialHistogramAggregation[float64])
 }
 
-// TODO: This can be defined in the test after we drop support for go1.19.
-type exponentialHistogramAggregationTestCase[N int64 | float64] struct {
-	name      string
-	build     func() (Measure[N], ComputeAggregation)
-	input     [][]N
-	want      metricdata.ExponentialHistogram[N]
-	wantCount int
-}
-
 func testExponentialHistogramAggregation[N int64 | float64](t *testing.T) {
 	const (
 		maxSize  = 4
@@ -763,7 +751,13 @@ func testExponentialHistogramAggregation[N int64 | float64](t *testing.T) {
 		noSum    = false
 	)
 
-	tests := []exponentialHistogramAggregationTestCase[N]{
+	tests := []struct {
+		name      string
+		build     func() (Measure[N], ComputeAggregation)
+		input     [][]N
+		want      metricdata.ExponentialHistogram[N]
+		wantCount int
+	}{
 		{
 			name: "Delta Single",
 			build: func() (Measure[N], ComputeAggregation) {
diff --git a/sdk/metric/internal/x/x.go b/sdk/metric/internal/x/x.go
index fea3a5805de..2891395725d 100644
--- a/sdk/metric/internal/x/x.go
+++ b/sdk/metric/internal/x/x.go
@@ -13,47 +13,81 @@
 // limitations under the License.
 
 // Package x contains support for OTel metric SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
 package x // import "go.opentelemetry.io/otel/sdk/metric/internal/x"
 
 import (
 	"os"
+	"strconv"
 	"strings"
 )
 
-const EnvKeyRoot = "OTEL_GO_X_"
-
 var (
-	CardinalityLimit = Feature{
-		EnvKeySuffix: "CARDINALITY_LIMIT",
-		// TODO: support accepting number values here to set the cardinality
-		// limit.
-		EnablementVals: []string{"true"},
-	}
+	// Exemplars is an experimental feature flag that defines if exemplars
+	// should be recorded for metric data-points.
+	//
+	// To enable this feature set the OTEL_GO_X_EXEMPLAR environment variable
+	// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+	// will also enable this).
+	Exemplars = newFeature("EXEMPLAR", func(v string) (string, bool) {
+		if strings.ToLower(v) == "true" {
+			return v, true
+		}
+		return "", false
+	})
+
+	// CardinalityLimit is an experimental feature flag that defines if
+	// cardinality limits should be applied to the recorded metric data-points.
+	//
+	// To enable this feature set the OTEL_GO_X_CARDINALITY_LIMIT environment
+	// variable to the integer limit value you want to use.
+	CardinalityLimit = newFeature("CARDINALITY_LIMIT", func(v string) (int, bool) {
+		n, err := strconv.Atoi(v)
+		if err != nil {
+			return 0, false
+		}
+		return n, true
+	})
 )
 
-type Feature struct {
-	// EnvKeySuffix is the environment variable key suffix the xFeature is
-	// stored at. It is assumed EnvKeyRoot is the base of the environment
-	// variable key.
-	EnvKeySuffix string
-	// EnablementVals are the case-insensitive comparison values that indicate
-	// the Feature is enabled.
-	EnablementVals []string
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+	key   string
+	parse func(v string) (T, bool)
 }
 
-// Enabled returns if the Feature is enabled.
-func Enabled(f Feature) bool {
-	key := EnvKeyRoot + f.EnvKeySuffix
-	vRaw, present := os.LookupEnv(key)
-	if !present {
-		return false
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+	const envKeyRoot = "OTEL_GO_X_"
+	return Feature[T]{
+		key:   envKeyRoot + suffix,
+		parse: parse,
 	}
+}
 
-	v := strings.ToLower(vRaw)
-	for _, allowed := range f.EnablementVals {
-		if v == strings.ToLower(allowed) {
-			return true
-		}
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+	// https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+	//
+	// > The SDK MUST interpret an empty value of an environment variable the
+	// > same way as when the variable is unset.
+	vRaw := os.Getenv(f.key)
+	if vRaw == "" {
+		return v, ok
 	}
-	return false
+	return f.parse(vRaw)
+}
+
+// Enabled returns if the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+	_, ok := f.Lookup()
+	return ok
 }
diff --git a/sdk/metric/internal/x/x_test.go b/sdk/metric/internal/x/x_test.go
new file mode 100644
index 00000000000..b643fe265ff
--- /dev/null
+++ b/sdk/metric/internal/x/x_test.go
@@ -0,0 +1,81 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package x
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestExemplars(t *testing.T) {
+	const key = "OTEL_GO_X_EXEMPLAR"
+	require.Equal(t, key, Exemplars.Key())
+
+	t.Run("true", run(setenv(key, "true"), assertEnabled(Exemplars, "true")))
+	t.Run("True", run(setenv(key, "True"), assertEnabled(Exemplars, "True")))
+	t.Run("TRUE", run(setenv(key, "TRUE"), assertEnabled(Exemplars, "TRUE")))
+	t.Run("false", run(setenv(key, "false"), assertDisabled(Exemplars)))
+	t.Run("1", run(setenv(key, "1"), assertDisabled(Exemplars)))
+	t.Run("empty", run(assertDisabled(Exemplars)))
+}
+
+func TestCardinalityLimit(t *testing.T) {
+	const key = "OTEL_GO_X_CARDINALITY_LIMIT"
+	require.Equal(t, key, CardinalityLimit.Key())
+
+	t.Run("100", run(setenv(key, "100"), assertEnabled(CardinalityLimit, 100)))
+	t.Run("-1", run(setenv(key, "-1"), assertEnabled(CardinalityLimit, -1)))
+	t.Run("false", run(setenv(key, "false"), assertDisabled(CardinalityLimit)))
+	t.Run("empty", run(assertDisabled(CardinalityLimit)))
+}
+
+func run(steps ...func(*testing.T)) func(*testing.T) {
+	return func(t *testing.T) {
+		t.Helper()
+		for _, step := range steps {
+			step(t)
+		}
+	}
+}
+
+func setenv(k, v string) func(t *testing.T) {
+	return func(t *testing.T) { t.Setenv(k, v) }
+}
+
+func assertEnabled[T any](f Feature[T], want T) func(*testing.T) {
+	return func(t *testing.T) {
+		t.Helper()
+		assert.True(t, f.Enabled(), "not enabled")
+
+		v, ok := f.Lookup()
+		assert.True(t, ok, "Lookup state")
+		assert.Equal(t, want, v, "Lookup value")
+	}
+}
+
+func assertDisabled[T any](f Feature[T]) func(*testing.T) {
+	var zero T
+	return func(t *testing.T) {
+		t.Helper()
+
+		assert.False(t, f.Enabled(), "enabled")
+
+		v, ok := f.Lookup()
+		assert.False(t, ok, "Lookup state")
+		assert.Equal(t, zero, v, "Lookup value")
+	}
+}
diff --git a/sdk/metric/limit.go b/sdk/metric/limit.go
index 589f80c4077..0fb5be74aba 100644
--- a/sdk/metric/limit.go
+++ b/sdk/metric/limit.go
@@ -17,12 +17,10 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
 import "go.opentelemetry.io/otel/sdk/metric/internal/x"
 
 func cardinalityLimit() int {
-	if !x.Enabled(x.CardinalityLimit) {
-		return 0
+	if v, ok := x.CardinalityLimit.Lookup(); ok {
+		return v
 	}
 
-	// TODO: make this configurable.
-
 	// Default 2000.
 	return 2000
 }
diff --git a/sdk/metric/manual_reader_test.go b/sdk/metric/manual_reader_test.go
index 210a66c1bbe..a2a1476fa83 100644
--- a/sdk/metric/manual_reader_test.go
+++ b/sdk/metric/manual_reader_test.go
@@ -40,8 +40,10 @@ func BenchmarkManualReader(b *testing.B) {
 	b.Run("Collect", benchReaderCollectFunc(NewManualReader()))
 }
 
-var deltaTemporalitySelector = func(InstrumentKind) metricdata.Temporality { return metricdata.DeltaTemporality }
-var cumulativeTemporalitySelector = func(InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality }
+var (
+	deltaTemporalitySelector      = func(InstrumentKind) metricdata.Temporality { return metricdata.DeltaTemporality }
+	cumulativeTemporalitySelector = func(InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality }
+)
 
 func TestManualReaderTemporality(t *testing.T) {
 	tests := []struct {
diff --git a/sdk/metric/meter.go b/sdk/metric/meter.go
index 9d2de67c594..7f51ec512ad 100644
--- a/sdk/metric/meter.go
+++ b/sdk/metric/meter.go
@@ -26,11 +26,9 @@ import (
 	"go.opentelemetry.io/otel/sdk/metric/internal/aggregate"
 )
 
-var (
-	// ErrInstrumentName indicates the created instrument has an invalid name.
-	// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, and start with a letter.
-	ErrInstrumentName = errors.New("invalid instrument name")
-)
+// ErrInstrumentName indicates the created instrument has an invalid name.
+// Valid names must consist of 255 or fewer characters including alphanumeric, _, ., -, / and start with a letter.
+var ErrInstrumentName = errors.New("invalid instrument name")
 
 // meter handles the creation and coordination of all metric instruments. A
 // meter represents a single instrumentation scope; all metric telemetry
@@ -97,9 +95,8 @@ func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCou
 // distribution of int64 measurements during a computational operation.
 func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
 	cfg := metric.NewInt64HistogramConfig(options...)
-	const kind = InstrumentKindHistogram
 	p := int64InstProvider{m}
-	i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+	i, err := p.lookupHistogram(name, cfg)
 	if err != nil {
 		return i, err
 	}
@@ -190,9 +187,8 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow
 // distribution of float64 measurements during a computational operation.
 func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
 	cfg := metric.NewFloat64HistogramConfig(options...)
-	const kind = InstrumentKindHistogram
 	p := float64InstProvider{m}
-	i, err := p.lookup(kind, name, cfg.Description(), cfg.Unit())
+	i, err := p.lookupHistogram(name, cfg)
 	if err != nil {
 		return i, err
 	}
@@ -262,15 +258,17 @@ func validateInstrumentName(name string) error {
 		return nil
 	}
 	for _, c := range name[1:] {
-		if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' {
-			return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-]", ErrInstrumentName, name)
+		if !isAlphanumeric(c) && c != '_' && c != '.' && c != '-' && c != '/' {
+			return fmt.Errorf("%w: %s: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName, name)
 		}
 	}
 	return nil
 }
+
 func isAlpha(c rune) bool {
 	return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z')
 }
+
 func isAlphanumeric(c rune) bool {
 	return isAlpha(c) || ('0' <= c && c <= '9')
 }
@@ -456,12 +454,36 @@ func (p int64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]ag
 	return p.int64Resolver.Aggregators(inst)
 }
 
+func (p int64InstProvider) histogramAggs(name string, cfg metric.Int64HistogramConfig) ([]aggregate.Measure[int64], error) {
+	boundaries := cfg.ExplicitBucketBoundaries()
+	aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+	if aggError != nil {
+		// If boundaries are invalid, ignore them.
+		boundaries = nil
+	}
+	inst := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindHistogram,
+		Scope:       p.scope,
+	}
+	measures, err := p.int64Resolver.HistogramAggregators(inst, boundaries)
+	return measures, errors.Join(aggError, err)
+}
+
 // lookup returns the resolved instrumentImpl.
 func (p int64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*int64Inst, error) {
 	aggs, err := p.aggs(kind, name, desc, u)
 	return &int64Inst{measures: aggs}, err
 }
 
+// lookupHistogram returns the resolved instrumentImpl.
+func (p int64InstProvider) lookupHistogram(name string, cfg metric.Int64HistogramConfig) (*int64Inst, error) {
+	aggs, err := p.histogramAggs(name, cfg)
+	return &int64Inst{measures: aggs}, err
+}
+
 // float64InstProvider provides float64 OpenTelemetry instruments.
 type float64InstProvider struct{ *meter }
 
@@ -476,12 +498,36 @@ func (p float64InstProvider) aggs(kind InstrumentKind, name, desc, u string) ([]
 	return p.float64Resolver.Aggregators(inst)
 }
 
+func (p float64InstProvider) histogramAggs(name string, cfg metric.Float64HistogramConfig) ([]aggregate.Measure[float64], error) {
+	boundaries := cfg.ExplicitBucketBoundaries()
+	aggError := AggregationExplicitBucketHistogram{Boundaries: boundaries}.err()
+	if aggError != nil {
+		// If boundaries are invalid, ignore them.
+		boundaries = nil
+	}
+	inst := Instrument{
+		Name:        name,
+		Description: cfg.Description(),
+		Unit:        cfg.Unit(),
+		Kind:        InstrumentKindHistogram,
+		Scope:       p.scope,
+	}
+	measures, err := p.float64Resolver.HistogramAggregators(inst, boundaries)
+	return measures, errors.Join(aggError, err)
+}
+
 // lookup returns the resolved instrumentImpl.
 func (p float64InstProvider) lookup(kind InstrumentKind, name, desc, u string) (*float64Inst, error) {
 	aggs, err := p.aggs(kind, name, desc, u)
 	return &float64Inst{measures: aggs}, err
 }
 
+// lookupHistogram returns the resolved instrumentImpl.
+func (p float64InstProvider) lookupHistogram(name string, cfg metric.Float64HistogramConfig) (*float64Inst, error) {
+	aggs, err := p.histogramAggs(name, cfg)
+	return &float64Inst{measures: aggs}, err
+}
+
 type int64ObservProvider struct{ *meter }
 
 func (p int64ObservProvider) lookup(kind InstrumentKind, name, desc, u string) (int64Observable, error) {
diff --git a/sdk/metric/meter_test.go b/sdk/metric/meter_test.go
index edb1a400b2d..0e082a7be06 100644
--- a/sdk/metric/meter_test.go
+++ b/sdk/metric/meter_test.go
@@ -16,6 +16,7 @@ package metric
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"strings"
 	"sync"
@@ -550,6 +551,17 @@ func TestMeterCreatesInstrumentsValidations(t *testing.T) {
 
 			wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName),
 		},
+		{
+			name: "Int64Histogram with invalid buckets",
+
+			fn: func(t *testing.T, m metric.Meter) error {
+				i, err := m.Int64Histogram("histogram", metric.WithExplicitBucketBoundaries(-1, 1, -5))
+				assert.NotNil(t, i)
+				return err
+			},
+
+			wantErr: errors.Join(fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, []float64{-1, 1, -5})),
+		},
 		{
 			name: "Int64ObservableCounter with no validation issues",
 
@@ -670,6 +682,17 @@ func TestMeterCreatesInstrumentsValidations(t *testing.T) {
 
 			wantErr: fmt.Errorf("%w: _: must start with a letter", ErrInstrumentName),
 		},
+		{
+			name: "Float64Histogram with invalid buckets",
+
+			fn: func(t *testing.T, m metric.Meter) error {
+				i, err := m.Float64Histogram("histogram", metric.WithExplicitBucketBoundaries(-1, 1, -5))
+				assert.NotNil(t, i)
+				return err
+			},
+
+			wantErr: errors.Join(fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, []float64{-1, 1, -5})),
+		},
 		{
 			name: "Float64ObservableCounter with no validation issues",
 
@@ -775,9 +798,12 @@ func TestValidateInstrumentName(t *testing.T) {
 		{
 			name: "nam.",
 		},
+		{
+			name: "nam/e",
+		},
 		{
 			name:    "name!",
-			wantErr: fmt.Errorf("%w: name!: must only contain [A-Za-z0-9_.-]", ErrInstrumentName),
+			wantErr: fmt.Errorf("%w: name!: must only contain [A-Za-z0-9_.-/]", ErrInstrumentName),
 		},
 		{
 			name:    longName,
@@ -1518,7 +1544,7 @@ func testAttributeFilter(temporality metricdata.Temporality) func(*testing.T) {
 					WithReader(rdr),
 					WithView(NewView(
 						Instrument{Name: "*"},
-						Stream{AllowAttributeKeys: []attribute.Key{"foo"}},
+						Stream{AttributeFilter: attribute.NewAllowKeysFilter("foo")},
 					)),
 				).Meter("TestAttributeFilter")
 				require.NoError(t, tt.register(t, mtr))
@@ -1565,8 +1591,11 @@ func TestObservableExample(t *testing.T) {
 		selector := func(InstrumentKind) metricdata.Temporality { return temp }
 		reader := NewManualReader(WithTemporalitySelector(selector))
 
-		noFiltered := NewView(Instrument{Name: instName}, Stream{Name: instName})
-		filtered := NewView(Instrument{Name: instName}, Stream{Name: filteredStream, AllowAttributeKeys: []attribute.Key{"pid"}})
+		allowAll := attribute.NewDenyKeysFilter()
+		noFiltered := NewView(Instrument{Name: instName}, Stream{Name: instName, AttributeFilter: allowAll})
+
+		filter := attribute.NewDenyKeysFilter("tid")
+		filtered := NewView(Instrument{Name: instName}, Stream{Name: filteredStream, AttributeFilter: filter})
 
 		mp := NewMeterProvider(WithReader(reader), WithView(noFiltered, filtered))
 		meter := mp.Meter(scopeName)
@@ -1820,12 +1849,15 @@ func BenchmarkInstrumentCreation(b *testing.B) {
 func testNilAggregationSelector(InstrumentKind) Aggregation {
 	return nil
 }
+
 func testDefaultAggregationSelector(InstrumentKind) Aggregation {
 	return AggregationDefault{}
 }
+
 func testUndefinedTemporalitySelector(InstrumentKind) metricdata.Temporality {
 	return metricdata.Temporality(0)
 }
+
 func testInvalidTemporalitySelector(InstrumentKind) metricdata.Temporality {
 	return metricdata.Temporality(255)
 }
@@ -1961,3 +1993,63 @@ func TestMalformedSelectors(t *testing.T) {
 		})
 	}
 }
+
+func TestHistogramBucketPrecedenceOrdering(t *testing.T) {
+	defaultBuckets := []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}
+	aggregationSelector := func(InstrumentKind) Aggregation {
+		return AggregationExplicitBucketHistogram{Boundaries: []float64{0, 1, 2, 3, 4, 5}}
+	}
+	for _, tt := range []struct {
+		desc                     string
+		reader                   Reader
+		views                    []View
+		histogramOpts            []metric.Float64HistogramOption
+		expectedBucketBoundaries []float64
+	}{
+		{
+			desc:                     "default",
+			reader:                   NewManualReader(),
+			expectedBucketBoundaries: defaultBuckets,
+		},
+		{
+			desc:                     "custom reader aggregation overrides default",
+			reader:                   NewManualReader(WithAggregationSelector(aggregationSelector)),
+			expectedBucketBoundaries: []float64{0, 1, 2, 3, 4, 5},
+		},
+		{
+			desc:   "overridden by histogram option",
+			reader: NewManualReader(WithAggregationSelector(aggregationSelector)),
+			histogramOpts: []metric.Float64HistogramOption{
+				metric.WithExplicitBucketBoundaries(0, 2, 4, 6, 8, 10),
+			},
+			expectedBucketBoundaries: []float64{0, 2, 4, 6, 8, 10},
+		},
+		{
+			desc:   "overridden by view",
+			reader: NewManualReader(WithAggregationSelector(aggregationSelector)),
+			histogramOpts: []metric.Float64HistogramOption{
+				metric.WithExplicitBucketBoundaries(0, 2, 4, 6, 8, 10),
+			},
+			views: []View{NewView(Instrument{Name: "*"}, Stream{
+				Aggregation: AggregationExplicitBucketHistogram{Boundaries: []float64{0, 3, 6, 9, 12, 15}},
+			})},
+			expectedBucketBoundaries: []float64{0, 3, 6, 9, 12, 15},
+		},
+	} {
+		t.Run(tt.desc, func(t *testing.T) {
+			meter := NewMeterProvider(WithView(tt.views...), WithReader(tt.reader)).Meter("TestHistogramBucketPrecedenceOrdering")
+			sfHistogram, err := meter.Float64Histogram("sync.float64.histogram", tt.histogramOpts...)
+			require.NoError(t, err)
+			sfHistogram.Record(context.Background(), 1)
+			var rm metricdata.ResourceMetrics
+			err = tt.reader.Collect(context.Background(), &rm)
+			require.NoError(t, err)
+			require.Len(t, rm.ScopeMetrics, 1)
+			require.Len(t, rm.ScopeMetrics[0].Metrics, 1)
+			gotHist, ok := rm.ScopeMetrics[0].Metrics[0].Data.(metricdata.Histogram[float64])
+			require.True(t, ok)
+			require.Len(t, gotHist.DataPoints, 1)
+			assert.Equal(t, tt.expectedBucketBoundaries, gotHist.DataPoints[0].Bounds)
+		})
+	}
+}
diff --git a/sdk/metric/metricdata/data.go b/sdk/metric/metricdata/data.go
index 49bbc0414a2..995d42b38f1 100644
--- a/sdk/metric/metricdata/data.go
+++ b/sdk/metric/metricdata/data.go
@@ -240,3 +240,54 @@ type Exemplar[N int64 | float64] struct {
 	// be empty.
 	TraceID []byte `json:",omitempty"`
 }
+
+// Summary metric data are used to convey quantile summaries,
+// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
+// data type.
+//
+// These data points cannot always be merged in a meaningful way. The Summary
+// type is only used by bridges from other metrics libraries, and cannot be
+// produced using OpenTelemetry instrumentation.
+type Summary struct {
+	// DataPoints are the individual aggregated measurements with unique
+	// attributes.
+	DataPoints []SummaryDataPoint
+}
+
+func (Summary) privateAggregation() {}
+
+// SummaryDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Summary metric.
+type SummaryDataPoint struct {
+	// Attributes is the set of key value pairs that uniquely identify the
+	// timeseries.
+	Attributes attribute.Set
+
+	// StartTime is when the timeseries was started.
+	StartTime time.Time
+	// Time is the time when the timeseries was recorded.
+	Time time.Time
+
+	// Count is the number of updates this summary has been calculated with.
+	Count uint64
+
+	// Sum is the sum of the values recorded.
+	Sum float64
+
+	// (Optional) list of values at different quantiles of the distribution calculated
+	// from the current snapshot. The quantiles must be strictly increasing.
+	QuantileValues []QuantileValue
+}
+
+// QuantileValue is the value at a given quantile of a summary.
+type QuantileValue struct {
+	// Quantile is the quantile of this value.
+	//
+	// Must be in the interval [0.0, 1.0].
+	Quantile float64
+
+	// Value is the value at the given quantile of a summary.
+	//
+	// Quantile values must NOT be negative.
+	Value float64
+}
diff --git a/sdk/metric/metricdata/metricdatatest/assertion.go b/sdk/metric/metricdata/metricdatatest/assertion.go
index f559a6432e8..a65fd99f482 100644
--- a/sdk/metric/metricdata/metricdatatest/assertion.go
+++ b/sdk/metric/metricdata/metricdatatest/assertion.go
@@ -18,7 +18,6 @@ package metricdatatest // import "go.opentelemetry.io/otel/sdk/metric/metricdata
 
 import (
 	"fmt"
-	"testing"
 
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/sdk/metric/metricdata"
@@ -47,15 +46,33 @@ type Datatypes interface {
 		metricdata.ExponentialHistogram[int64] |
 		metricdata.ExponentialHistogramDataPoint[float64] |
 		metricdata.ExponentialHistogramDataPoint[int64] |
-		metricdata.ExponentialBucket
+		metricdata.ExponentialBucket |
+		metricdata.Summary |
+		metricdata.SummaryDataPoint |
+		metricdata.QuantileValue
 
 	// Interface types are not allowed in union types, therefore the
 	// Aggregation and Value type from metricdata are not included here.
 }
 
+// TestingT is an interface that implements [testing.T], but without the
+// private method of [testing.TB], so other testing packages can rely on it as
+// well.
+// The methods in this interface must match the [testing.TB] interface.
+type TestingT interface {
+	Helper()
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
+	Error(...any)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+}
+
 type config struct {
 	ignoreTimestamp bool
 	ignoreExemplars bool
+	ignoreValue     bool
 }
 
 func newConfig(opts []Option) config {
@@ -93,9 +110,23 @@ func IgnoreExemplars() Option {
 	})
 }
 
+// IgnoreValue disables checking if values are different. This can be
+// useful for non-deterministic values, like measured durations.
+//
+// This will ignore the value and trace information for Exemplars;
+// the buckets, zero count, scale, sum, max, min, and counts of
+// ExponentialHistogramDataPoints; the buckets, sum, count, max,
+// and min of HistogramDataPoints; the value of DataPoints.
+func IgnoreValue() Option {
+	return fnOption(func(cfg config) config {
+		cfg.ignoreValue = true
+		return cfg
+	})
+}
+
 // AssertEqual asserts that the two concrete data-types from the metricdata
 // package are equal.
-func AssertEqual[T Datatypes](t *testing.T, expected, actual T, opts ...Option) bool {
+func AssertEqual[T Datatypes](t TestingT, expected, actual T, opts ...Option) bool {
 	t.Helper()
 
 	cfg := newConfig(opts)
@@ -149,6 +180,12 @@ func AssertEqual[T Datatypes](t *testing.T, expected, actual T, opts ...Option)
 		r = equalExponentialHistogramDataPoints(e, aIface.(metricdata.ExponentialHistogramDataPoint[int64]), cfg)
 	case metricdata.ExponentialBucket:
 		r = equalExponentialBuckets(e, aIface.(metricdata.ExponentialBucket), cfg)
+	case metricdata.Summary:
+		r = equalSummary(e, aIface.(metricdata.Summary), cfg)
+	case metricdata.SummaryDataPoint:
+		r = equalSummaryDataPoint(e, aIface.(metricdata.SummaryDataPoint), cfg)
+	case metricdata.QuantileValue:
+		r = equalQuantileValue(e, aIface.(metricdata.QuantileValue), cfg)
 	default:
 		// We control all types passed to this, panic to signal developers
 		// early they changed things in an incompatible way.
@@ -163,7 +200,7 @@ func AssertEqual[T Datatypes](t *testing.T, expected, actual T, opts ...Option)
 }
 
 // AssertAggregationsEqual asserts that two Aggregations are equal.
-func AssertAggregationsEqual(t *testing.T, expected, actual metricdata.Aggregation, opts ...Option) bool {
+func AssertAggregationsEqual(t TestingT, expected, actual metricdata.Aggregation, opts ...Option) bool {
 	t.Helper()
 
 	cfg := newConfig(opts)
@@ -175,7 +212,7 @@ func AssertAggregationsEqual(t *testing.T, expected, actual metricdata.Aggregati
 }
 
 // AssertHasAttributes asserts that all Datapoints or HistogramDataPoints have all passed attrs.
-func AssertHasAttributes[T Datatypes](t *testing.T, actual T, attrs ...attribute.KeyValue) bool {
+func AssertHasAttributes[T Datatypes](t TestingT, actual T, attrs ...attribute.KeyValue) bool {
 	t.Helper()
 
 	var reasons []string
@@ -223,6 +260,12 @@ func AssertHasAttributes[T Datatypes](t *testing.T, actual T, attrs ...attribute
 		reasons = hasAttributesExponentialHistogramDataPoints(e, attrs...)
 	case metricdata.ExponentialBucket:
 		// Nothing to check.
+	case metricdata.Summary:
+		reasons = hasAttributesSummary(e, attrs...)
+	case metricdata.SummaryDataPoint:
+		reasons = hasAttributesSummaryDataPoint(e, attrs...)
+	case metricdata.QuantileValue:
+		// Nothing to check.
 	default:
 		// We control all types passed to this, panic to signal developers
 		// early they changed things in an incompatible way.
diff --git a/sdk/metric/metricdata/metricdatatest/assertion_test.go b/sdk/metric/metricdata/metricdatatest/assertion_test.go
index 3b9d8d6daa1..9d647a54004 100644
--- a/sdk/metric/metricdata/metricdatatest/assertion_test.go
+++ b/sdk/metric/metricdata/metricdatatest/assertion_test.go
@@ -85,6 +85,20 @@ var (
 		SpanID:             spanIDA,
 		TraceID:            traceIDA,
 	}
+	exemplarInt64D = metricdata.Exemplar[int64]{
+		FilteredAttributes: fltrAttrA,
+		Time:               endA,
+		Value:              12,
+		SpanID:             spanIDA,
+		TraceID:            traceIDA,
+	}
+	exemplarFloat64D = metricdata.Exemplar[float64]{
+		FilteredAttributes: fltrAttrA,
+		Time:               endA,
+		Value:              12.0,
+		SpanID:             spanIDA,
+		TraceID:            traceIDA,
+	}
 
 	dataPointInt64A = metricdata.DataPoint[int64]{
 		Attributes: attrA,
@@ -128,6 +142,20 @@ var (
 		Value:      -1.0,
 		Exemplars:  []metricdata.Exemplar[float64]{exemplarFloat64C},
 	}
+	dataPointInt64D = metricdata.DataPoint[int64]{
+		Attributes: attrA,
+		StartTime:  startA,
+		Time:       endA,
+		Value:      2,
+		Exemplars:  []metricdata.Exemplar[int64]{exemplarInt64A},
+	}
+	dataPointFloat64D = metricdata.DataPoint[float64]{
+		Attributes: attrA,
+		StartTime:  startA,
+		Time:       endA,
+		Value:      2.0,
+		Exemplars:  []metricdata.Exemplar[float64]{exemplarFloat64A},
+	}
 
 	minFloat64A              = metricdata.NewExtrema(-1.)
 	minInt64A                = metricdata.NewExtrema[int64](-1)
@@ -204,6 +232,69 @@ var (
 		Sum:          2,
 		Exemplars:    []metricdata.Exemplar[float64]{exemplarFloat64C},
 	}
+	histogramDataPointInt64D = metricdata.HistogramDataPoint[int64]{
+		Attributes:   attrA,
+		StartTime:    startA,
+		Time:         endA,
+		Count:        3,
+		Bounds:       []float64{0, 10, 100},
+		BucketCounts: []uint64{1, 1, 1},
+		Max:          maxInt64B,
+		Min:          minInt64B,
+		Sum:          3,
+		Exemplars:    []metricdata.Exemplar[int64]{exemplarInt64A},
+	}
+	histogramDataPointFloat64D = metricdata.HistogramDataPoint[float64]{
+		Attributes:   attrA,
+		StartTime:    startA,
+		Time:         endA,
+		Count:        3,
+		Bounds:       []float64{0, 10, 100},
+		BucketCounts: []uint64{1, 1, 1},
+		Max:          maxFloat64B,
+		Min:          minFloat64B,
+		Sum:          3,
+		Exemplars:    []metricdata.Exemplar[float64]{exemplarFloat64A},
+	}
+
+	quantileValueA = metricdata.QuantileValue{
+		Quantile: 0.0,
+		Value:    0.1,
+	}
+	quantileValueB = metricdata.QuantileValue{
+		Quantile: 0.1,
+		Value:    0.2,
+	}
+	summaryDataPointA = metricdata.SummaryDataPoint{
+		Attributes:     attrA,
+		StartTime:      startA,
+		Time:           endA,
+		Count:          2,
+		Sum:            3,
+		QuantileValues: []metricdata.QuantileValue{quantileValueA},
+	}
+	summaryDataPointB = metricdata.SummaryDataPoint{
+		Attributes:     attrB,
+		StartTime:      startB,
+		Time:           endB,
+		Count:          3,
+		QuantileValues: []metricdata.QuantileValue{quantileValueB},
+	}
+	summaryDataPointC = metricdata.SummaryDataPoint{
+		Attributes:     attrA,
+		StartTime:      startB,
+		Time:           endB,
+		Count:          2,
+		Sum:            3,
+		QuantileValues: []metricdata.QuantileValue{quantileValueA},
+	}
+	summaryDataPointD = metricdata.SummaryDataPoint{
+		Attributes:     attrA,
+		StartTime:      startA,
+		Time:           endA,
+		Count:          3,
+		QuantileValues: []metricdata.QuantileValue{quantileValueB},
+	}
 
 	exponentialBucket2 = metricdata.ExponentialBucket{
 		Offset: 2,
@@ -301,6 +392,34 @@ var (
 		NegativeBucket: exponentialBucket2,
 		Exemplars:      []metricdata.Exemplar[float64]{exemplarFloat64C},
 	}
+	exponentialHistogramDataPointInt64D = metricdata.ExponentialHistogramDataPoint[int64]{
+		Attributes:     attrA,
+		StartTime:      startA,
+		Time:           endA,
+		Count:          6,
+		Min:            minInt64B,
+		Max:            maxInt64B,
+		Sum:            3,
+		Scale:          2,
+		ZeroCount:      3,
+		PositiveBucket: exponentialBucket4,
+		NegativeBucket: exponentialBucket5,
+		Exemplars:      []metricdata.Exemplar[int64]{exemplarInt64A},
+	}
+	exponentialHistogramDataPointFloat64D = metricdata.ExponentialHistogramDataPoint[float64]{
+		Attributes:     attrA,
+		StartTime:      startA,
+		Time:           endA,
+		Count:          6,
+		Min:            minFloat64B,
+		Max:            maxFloat64B,
+		Sum:            3,
+		Scale:          2,
+		ZeroCount:      3,
+		PositiveBucket: exponentialBucket4,
+		NegativeBucket: exponentialBucket5,
+		Exemplars:      []metricdata.Exemplar[float64]{exemplarFloat64A},
+	}
 
 	gaugeInt64A = metricdata.Gauge[int64]{
 		DataPoints: []metricdata.DataPoint[int64]{dataPointInt64A},
@@ -320,6 +439,12 @@ var (
 	gaugeFloat64C = metricdata.Gauge[float64]{
 		DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64C},
 	}
+	gaugeInt64D = metricdata.Gauge[int64]{
+		DataPoints: []metricdata.DataPoint[int64]{dataPointInt64D},
+	}
+	gaugeFloat64D = metricdata.Gauge[float64]{
+		DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64D},
+	}
 
 	sumInt64A = metricdata.Sum[int64]{
 		Temporality: metricdata.CumulativeTemporality,
@@ -351,6 +476,16 @@ var (
 		IsMonotonic: true,
 		DataPoints:  []metricdata.DataPoint[float64]{dataPointFloat64C},
 	}
+	sumInt64D = metricdata.Sum[int64]{
+		Temporality: metricdata.CumulativeTemporality,
+		IsMonotonic: true,
+		DataPoints:  []metricdata.DataPoint[int64]{dataPointInt64D},
+	}
+	sumFloat64D = metricdata.Sum[float64]{
+		Temporality: metricdata.CumulativeTemporality,
+		IsMonotonic: true,
+		DataPoints:  []metricdata.DataPoint[float64]{dataPointFloat64D},
+	}
 
 	histogramInt64A = metricdata.Histogram[int64]{
 		Temporality: metricdata.CumulativeTemporality,
@@ -376,6 +511,14 @@ var (
 		Temporality: metricdata.CumulativeTemporality,
 		DataPoints:  []metricdata.HistogramDataPoint[float64]{histogramDataPointFloat64C},
 	}
+	histogramInt64D = metricdata.Histogram[int64]{
+		Temporality: metricdata.CumulativeTemporality,
+		DataPoints:  []metricdata.HistogramDataPoint[int64]{histogramDataPointInt64D},
+	}
+	histogramFloat64D = metricdata.Histogram[float64]{
+		Temporality: metricdata.CumulativeTemporality,
+		DataPoints:  []metricdata.HistogramDataPoint[float64]{histogramDataPointFloat64D},
+	}
 
 	exponentialHistogramInt64A = metricdata.ExponentialHistogram[int64]{
 		Temporality: metricdata.CumulativeTemporality,
@@ -401,6 +544,30 @@ var (
 		Temporality: metricdata.CumulativeTemporality,
 		DataPoints:  []metricdata.ExponentialHistogramDataPoint[float64]{exponentialHistogramDataPointFloat64C},
 	}
+	exponentialHistogramInt64D = metricdata.ExponentialHistogram[int64]{
+		Temporality: metricdata.CumulativeTemporality,
+		DataPoints:  []metricdata.ExponentialHistogramDataPoint[int64]{exponentialHistogramDataPointInt64D},
+	}
+	exponentialHistogramFloat64D = metricdata.ExponentialHistogram[float64]{
+		Temporality: metricdata.CumulativeTemporality,
+		DataPoints:  []metricdata.ExponentialHistogramDataPoint[float64]{exponentialHistogramDataPointFloat64D},
+	}
+
+	summaryA = metricdata.Summary{
+		DataPoints: []metricdata.SummaryDataPoint{summaryDataPointA},
+	}
+
+	summaryB = metricdata.Summary{
+		DataPoints: []metricdata.SummaryDataPoint{summaryDataPointB},
+	}
+
+	summaryC = metricdata.Summary{
+		DataPoints: []metricdata.SummaryDataPoint{summaryDataPointC},
+	}
+
+	summaryD = metricdata.Summary{
+		DataPoints: []metricdata.SummaryDataPoint{summaryDataPointD},
+	}
 
 	metricsA = metricdata.Metrics{
 		Name:        "A",
@@ -420,6 +587,12 @@ var (
 		Unit:        "1",
 		Data:        sumInt64C,
 	}
+	metricsD = metricdata.Metrics{
+		Name:        "A",
+		Description: "A desc",
+		Unit:        "1",
+		Data:        sumInt64D,
+	}
 
 	scopeMetricsA = metricdata.ScopeMetrics{
 		Scope:   instrumentation.Scope{Name: "A"},
@@ -433,6 +606,10 @@ var (
 		Scope:   instrumentation.Scope{Name: "A"},
 		Metrics: []metricdata.Metrics{metricsC},
 	}
+	scopeMetricsD = metricdata.ScopeMetrics{
+		Scope:   instrumentation.Scope{Name: "A"},
+		Metrics: []metricdata.Metrics{metricsD},
+	}
 
 	resourceMetricsA = metricdata.ResourceMetrics{
 		Resource:     resource.NewSchemaless(attribute.String("resource", "A")),
@@ -446,6 +623,10 @@ var (
 		Resource:     resource.NewSchemaless(attribute.String("resource", "A")),
 		ScopeMetrics: []metricdata.ScopeMetrics{scopeMetricsC},
 	}
+	resourceMetricsD = metricdata.ResourceMetrics{
+		Resource:     resource.NewSchemaless(attribute.String("resource", "A")),
+		ScopeMetrics: []metricdata.ScopeMetrics{scopeMetricsD},
+	}
 )
 
 type equalFunc[T Datatypes] func(T, T, config) []string
@@ -482,6 +663,21 @@ func testDatatypeIgnoreExemplars[T Datatypes](a, b T, f equalFunc[T]) func(*test
 	}
 }
 
+func testDatatypeIgnoreValue[T Datatypes](a, b T, f equalFunc[T]) func(*testing.T) {
+	return func(t *testing.T) {
+		AssertEqual(t, a, a)
+		AssertEqual(t, b, b)
+
+		c := newConfig([]Option{IgnoreValue()})
+		r := f(a, b, c)
+		assert.Len(t, r, 0, "unexpected inequality")
+	}
+}
+
+func TestTestingTImplementation(t *testing.T) {
+	assert.Implements(t, (*TestingT)(nil), t)
+}
+
 func TestAssertEqual(t *testing.T) {
 	t.Run("ResourceMetrics", testDatatype(resourceMetricsA, resourceMetricsB, equalResourceMetrics))
 	t.Run("ScopeMetrics", testDatatype(scopeMetricsA, scopeMetricsB, equalScopeMetrics))
@@ -505,6 +701,9 @@ func TestAssertEqual(t *testing.T) {
 	t.Run("ExponentialHistogramDataPointInt64", testDatatype(exponentialHistogramDataPointInt64A, exponentialHistogramDataPointInt64B, equalExponentialHistogramDataPoints[int64]))
 	t.Run("ExponentialHistogramDataPointFloat64", testDatatype(exponentialHistogramDataPointFloat64A, exponentialHistogramDataPointFloat64B, equalExponentialHistogramDataPoints[float64]))
 	t.Run("ExponentialBuckets", testDatatype(exponentialBucket2, exponentialBucket3, equalExponentialBuckets))
+	t.Run("Summary", testDatatype(summaryA, summaryB, equalSummary))
+	t.Run("SummaryDataPoint", testDatatype(summaryDataPointA, summaryDataPointB, equalSummaryDataPoint))
+	t.Run("QuantileValues", testDatatype(quantileValueA, quantileValueB, equalQuantileValue))
 }
 
 func TestAssertEqualIgnoreTime(t *testing.T) {
@@ -529,6 +728,8 @@ func TestAssertEqualIgnoreTime(t *testing.T) {
 	t.Run("ExponentialHistogramFloat64", testDatatypeIgnoreTime(exponentialHistogramFloat64A, exponentialHistogramFloat64C, equalExponentialHistograms[float64]))
 	t.Run("ExponentialHistogramDataPointInt64", testDatatypeIgnoreTime(exponentialHistogramDataPointInt64A, exponentialHistogramDataPointInt64C, equalExponentialHistogramDataPoints[int64]))
 	t.Run("ExponentialHistogramDataPointFloat64", testDatatypeIgnoreTime(exponentialHistogramDataPointFloat64A, exponentialHistogramDataPointFloat64C, equalExponentialHistogramDataPoints[float64]))
+	t.Run("Summary", testDatatypeIgnoreTime(summaryA, summaryC, equalSummary))
+	t.Run("SummaryDataPoint", testDatatypeIgnoreTime(summaryDataPointA, summaryDataPointC, equalSummaryDataPoint))
 }
 
 func TestAssertEqualIgnoreExemplars(t *testing.T) {
@@ -557,6 +758,30 @@ func TestAssertEqualIgnoreExemplars(t *testing.T) {
 	t.Run("ExponentialHistogramDataPointFloat64", testDatatypeIgnoreExemplars(exponentialHistogramDataPointFloat64A, ehdpFloat64, equalExponentialHistogramDataPoints[float64]))
 }
 
+func TestAssertEqualIgnoreValue(t *testing.T) {
+	t.Run("ResourceMetrics", testDatatypeIgnoreValue(resourceMetricsA, resourceMetricsD, equalResourceMetrics))
+	t.Run("ScopeMetrics", testDatatypeIgnoreValue(scopeMetricsA, scopeMetricsD, equalScopeMetrics))
+	t.Run("Metrics", testDatatypeIgnoreValue(metricsA, metricsD, equalMetrics))
+	t.Run("HistogramInt64", testDatatypeIgnoreValue(histogramInt64A, histogramInt64D, equalHistograms[int64]))
+	t.Run("HistogramFloat64", testDatatypeIgnoreValue(histogramFloat64A, histogramFloat64D, equalHistograms[float64]))
+	t.Run("SumInt64", testDatatypeIgnoreValue(sumInt64A, sumInt64D, equalSums[int64]))
+	t.Run("SumFloat64", testDatatypeIgnoreValue(sumFloat64A, sumFloat64D, equalSums[float64]))
+	t.Run("GaugeInt64", testDatatypeIgnoreValue(gaugeInt64A, gaugeInt64D, equalGauges[int64]))
+	t.Run("GaugeFloat64", testDatatypeIgnoreValue(gaugeFloat64A, gaugeFloat64D, equalGauges[float64]))
+	t.Run("HistogramDataPointInt64", testDatatypeIgnoreValue(histogramDataPointInt64A, histogramDataPointInt64D, equalHistogramDataPoints[int64]))
+	t.Run("HistogramDataPointFloat64", testDatatypeIgnoreValue(histogramDataPointFloat64A, histogramDataPointFloat64D, equalHistogramDataPoints[float64]))
+	t.Run("DataPointInt64", testDatatypeIgnoreValue(dataPointInt64A, dataPointInt64D, equalDataPoints[int64]))
+	t.Run("DataPointFloat64", testDatatypeIgnoreValue(dataPointFloat64A, dataPointFloat64D, equalDataPoints[float64]))
+	t.Run("ExemplarInt64", testDatatypeIgnoreValue(exemplarInt64A, exemplarInt64D, equalExemplars[int64]))
+	t.Run("ExemplarFloat64", testDatatypeIgnoreValue(exemplarFloat64A, exemplarFloat64D, equalExemplars[float64]))
+	t.Run("ExponentialHistogramInt64", testDatatypeIgnoreValue(exponentialHistogramInt64A, exponentialHistogramInt64D, equalExponentialHistograms[int64]))
+	t.Run("ExponentialHistogramFloat64", testDatatypeIgnoreValue(exponentialHistogramFloat64A, exponentialHistogramFloat64D, equalExponentialHistograms[float64]))
+	t.Run("ExponentialHistogramDataPointInt64", testDatatypeIgnoreValue(exponentialHistogramDataPointInt64A, exponentialHistogramDataPointInt64D, equalExponentialHistogramDataPoints[int64]))
+	t.Run("ExponentialHistogramDataPointFloat64", testDatatypeIgnoreValue(exponentialHistogramDataPointFloat64A, exponentialHistogramDataPointFloat64D, equalExponentialHistogramDataPoints[float64]))
+	t.Run("Summary", testDatatypeIgnoreValue(summaryA, summaryD, equalSummary))
+	t.Run("SummaryDataPoint", testDatatypeIgnoreValue(summaryDataPointA, summaryDataPointD, equalSummaryDataPoint))
+}
+
 type unknownAggregation struct {
 	metricdata.Aggregation
 }
@@ -571,6 +796,7 @@ func TestAssertAggregationsEqual(t *testing.T) {
 	AssertAggregationsEqual(t, histogramFloat64A, histogramFloat64A)
 	AssertAggregationsEqual(t, exponentialHistogramInt64A, exponentialHistogramInt64A)
 	AssertAggregationsEqual(t, exponentialHistogramFloat64A, exponentialHistogramFloat64A)
+	AssertAggregationsEqual(t, summaryA, summaryA)
 
 	r := equalAggregations(sumInt64A, nil, config{})
 	assert.Len(t, r, 1, "should return nil comparison mismatch only")
@@ -587,47 +813,80 @@ func TestAssertAggregationsEqual(t *testing.T) {
 	r = equalAggregations(sumInt64A, sumInt64C, config{ignoreTimestamp: true})
 	assert.Len(t, r, 0, "sums should be equal: %v", r)
 
+	r = equalAggregations(sumInt64A, sumInt64D, config{ignoreValue: true})
+	assert.Len(t, r, 0, "value should be ignored: %v == %v", sumInt64A, sumInt64D)
+
 	r = equalAggregations(sumFloat64A, sumFloat64B, config{})
 	assert.Greaterf(t, len(r), 0, "sums should not be equal: %v == %v", sumFloat64A, sumFloat64B)
 
 	r = equalAggregations(sumFloat64A, sumFloat64C, config{ignoreTimestamp: true})
 	assert.Len(t, r, 0, "sums should be equal: %v", r)
 
+	r = equalAggregations(sumFloat64A, sumFloat64D, config{ignoreValue: true})
+	assert.Len(t, r, 0, "value should be ignored: %v == %v", sumFloat64A, sumFloat64D)
+
 	r = equalAggregations(gaugeInt64A, gaugeInt64B, config{})
 	assert.Greaterf(t, len(r), 0, "gauges should not be equal: %v == %v", gaugeInt64A, gaugeInt64B)
 
 	r = equalAggregations(gaugeInt64A, gaugeInt64C, config{ignoreTimestamp: true})
 	assert.Len(t, r, 0, "gauges should be equal: %v", r)
 
+	r = equalAggregations(gaugeInt64A, gaugeInt64D, config{ignoreValue: true})
+	assert.Len(t, r, 0, "value should be ignored: %v == %v", gaugeInt64A, gaugeInt64D)
+
 	r = equalAggregations(gaugeFloat64A, gaugeFloat64B, config{})
 	assert.Greaterf(t, len(r), 0, "gauges should not be equal: %v == %v", gaugeFloat64A, gaugeFloat64B)
 
 	r = equalAggregations(gaugeFloat64A, gaugeFloat64C, config{ignoreTimestamp: true})
 	assert.Len(t, r, 0, "gauges should be equal: %v", r)
 
+	r = equalAggregations(gaugeFloat64A, gaugeFloat64D, config{ignoreValue: true})
+	assert.Len(t, r, 0, "value should be ignored: %v == %v", gaugeFloat64A, gaugeFloat64D)
+
 	r = equalAggregations(histogramInt64A, histogramInt64B, config{})
 	assert.Greaterf(t, len(r), 0, "histograms should not be equal: %v == %v", histogramInt64A, histogramInt64B)
 
 	r = equalAggregations(histogramInt64A, histogramInt64C, config{ignoreTimestamp: true})
 	assert.Len(t, r, 0, "histograms should be equal: %v", r)
 
+	r = equalAggregations(histogramInt64A, histogramInt64D, config{ignoreValue: true})
+	assert.Len(t, r, 0, "value should be ignored: %v == %v", histogramInt64A, histogramInt64D)
+
 	r = equalAggregations(histogramFloat64A, histogramFloat64B, config{})
 	assert.Greaterf(t, len(r), 0, "histograms should not be equal: %v == %v", histogramFloat64A, histogramFloat64B)
 
 	r = equalAggregations(histogramFloat64A, histogramFloat64C, config{ignoreTimestamp: true})
 	assert.Len(t, r, 0, "histograms should be equal: %v", r)
 
+	r = equalAggregations(histogramFloat64A, histogramFloat64D, config{ignoreValue: true})
+	assert.Len(t, r, 0, "value should be ignored: %v == %v", histogramFloat64A, histogramFloat64D)
+
 	r = equalAggregations(exponentialHistogramInt64A, exponentialHistogramInt64B, config{})
 	assert.Greaterf(t, len(r), 0, "exponential histograms should not be equal: %v == %v", exponentialHistogramInt64A, exponentialHistogramInt64B)
 
 	r = equalAggregations(exponentialHistogramInt64A, exponentialHistogramInt64C, config{ignoreTimestamp: true})
 	assert.Len(t, r, 0, "exponential histograms should be equal: %v", r)
 
+	r = equalAggregations(exponentialHistogramInt64A, exponentialHistogramInt64D, config{ignoreValue: true})
+	assert.Len(t, r, 0, "value should be ignored: %v == %v", exponentialHistogramInt64A, exponentialHistogramInt64D)
+
 	r = equalAggregations(exponentialHistogramFloat64A, exponentialHistogramFloat64B, config{})
 	assert.Greaterf(t, len(r), 0, "exponential histograms should not be equal: %v == %v", exponentialHistogramFloat64A, exponentialHistogramFloat64B)
 
 	r = equalAggregations(exponentialHistogramFloat64A, exponentialHistogramFloat64C, config{ignoreTimestamp: true})
 	assert.Len(t, r, 0, "exponential histograms should be equal: %v", r)
+
+	r = equalAggregations(exponentialHistogramFloat64A, exponentialHistogramFloat64D, config{ignoreValue: true})
+	assert.Len(t, r, 0, "value should be ignored: %v == %v", exponentialHistogramFloat64A, exponentialHistogramFloat64D)
+
+	r = equalAggregations(summaryA, summaryB, config{})
+	assert.Greaterf(t, len(r), 0, "summaries should not be equal: %v == %v", summaryA, summaryB)
+
+	r = equalAggregations(summaryA, summaryC, config{ignoreTimestamp: true})
+	assert.Len(t, r, 0, "summaries should be equal: %v", r)
+
+	r = equalAggregations(summaryA, summaryD, config{ignoreValue: true})
+	assert.Len(t, r, 0, "value should be ignored: %v == %v", summaryA, summaryD)
 }
 
 func TestAssertAttributes(t *testing.T) {
@@ -652,6 +911,9 @@ func TestAssertAttributes(t *testing.T) {
 	AssertHasAttributes(t, exponentialHistogramInt64A, attribute.Bool("A", true))
 	AssertHasAttributes(t, exponentialHistogramFloat64A, attribute.Bool("A", true))
 	AssertHasAttributes(t, exponentialBucket2, attribute.Bool("A", true)) // No-op, always pass.
+	AssertHasAttributes(t, summaryDataPointA, attribute.Bool("A", true))
+	AssertHasAttributes(t, summaryA, attribute.Bool("A", true))
+	AssertHasAttributes(t, quantileValueA, attribute.Bool("A", true)) // No-op, always pass.
 
 	r := hasAttributesAggregation(gaugeInt64A, attribute.Bool("A", true))
 	assert.Equal(t, len(r), 0, "gaugeInt64A has A=True")
@@ -669,6 +931,8 @@ func TestAssertAttributes(t *testing.T) {
 	assert.Equal(t, len(r), 0, "exponentialHistogramInt64A has A=True")
 	r = hasAttributesAggregation(exponentialHistogramFloat64A, attribute.Bool("A", true))
 	assert.Equal(t, len(r), 0, "exponentialHistogramFloat64A has A=True")
+	r = hasAttributesAggregation(summaryA, attribute.Bool("A", true))
+	assert.Equal(t, len(r), 0, "summaryA has A=True")
 
 	r = hasAttributesAggregation(gaugeInt64A, attribute.Bool("A", false))
 	assert.Greater(t, len(r), 0, "gaugeInt64A does not have A=False")
@@ -686,6 +950,8 @@ func TestAssertAttributes(t *testing.T) {
 	assert.Greater(t, len(r), 0, "exponentialHistogramInt64A does not have A=False")
 	r = hasAttributesAggregation(exponentialHistogramFloat64A, attribute.Bool("A", false))
 	assert.Greater(t, len(r), 0, "exponentialHistogramFloat64A does not have A=False")
+	r = hasAttributesAggregation(summaryA, attribute.Bool("A", false))
+	assert.Greater(t, len(r), 0, "summaryA does not have A=False")
 
 	r = hasAttributesAggregation(gaugeInt64A, attribute.Bool("B", true))
 	assert.Greater(t, len(r), 0, "gaugeInt64A does not have Attribute B")
@@ -703,6 +969,8 @@ func TestAssertAttributes(t *testing.T) {
 	assert.Greater(t, len(r), 0, "exponentialHistogramIntA does not have Attribute B")
 	r = hasAttributesAggregation(exponentialHistogramFloat64A, attribute.Bool("B", true))
 	assert.Greater(t, len(r), 0, "exponentialHistogramFloatA does not have Attribute B")
+	r = hasAttributesAggregation(summaryA, attribute.Bool("B", true))
+	assert.Greater(t, len(r), 0, "summaryA does not have Attribute B")
 }
 
 func TestAssertAttributesFail(t *testing.T) {
@@ -727,6 +995,10 @@ func TestAssertAttributesFail(t *testing.T) {
 	assert.False(t, AssertHasAttributes(fakeT, exponentialHistogramDataPointFloat64A, attribute.Bool("B", true)))
 	assert.False(t, AssertHasAttributes(fakeT, exponentialHistogramInt64A, attribute.Bool("A", false)))
 	assert.False(t, AssertHasAttributes(fakeT, exponentialHistogramFloat64A, attribute.Bool("B", true)))
+	assert.False(t, AssertHasAttributes(fakeT, summaryDataPointA, attribute.Bool("A", false)))
+	assert.False(t, AssertHasAttributes(fakeT, summaryDataPointA, attribute.Bool("B", true)))
+	assert.False(t, AssertHasAttributes(fakeT, summaryA, attribute.Bool("A", false)))
+	assert.False(t, AssertHasAttributes(fakeT, summaryA, attribute.Bool("B", true)))
 
 	sum := metricdata.Sum[int64]{
 		Temporality: metricdata.CumulativeTemporality,
diff --git a/sdk/metric/metricdata/metricdatatest/comparisons.go b/sdk/metric/metricdata/metricdatatest/comparisons.go
index 4bb3b19fb0b..a25c930676f 100644
--- a/sdk/metric/metricdata/metricdatatest/comparisons.go
+++ b/sdk/metric/metricdata/metricdatatest/comparisons.go
@@ -155,6 +155,12 @@ func equalAggregations(a, b metricdata.Aggregation, cfg config) (reasons []strin
 			reasons = append(reasons, "ExponentialHistogram not equal:")
 			reasons = append(reasons, r...)
 		}
+	case metricdata.Summary:
+		r := equalSummary(v, b.(metricdata.Summary), cfg)
+		if len(r) > 0 {
+			reasons = append(reasons, "Summary not equal:")
+			reasons = append(reasons, r...)
+		}
 	default:
 		reasons = append(reasons, fmt.Sprintf("Aggregation of unknown types %T", a))
 	}
@@ -252,8 +258,10 @@ func equalDataPoints[N int64 | float64](a, b metricdata.DataPoint[N], cfg config
 		}
 	}
 
-	if a.Value != b.Value {
-		reasons = append(reasons, notEqualStr("Value", a.Value, b.Value))
+	if !cfg.ignoreValue {
+		if a.Value != b.Value {
+			reasons = append(reasons, notEqualStr("Value", a.Value, b.Value))
+		}
 	}
 
 	if !cfg.ignoreExemplars {
@@ -290,23 +298,25 @@ func equalHistogramDataPoints[N int64 | float64](a, b metricdata.HistogramDataPo
 			reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano()))
 		}
 	}
-	if a.Count != b.Count {
-		reasons = append(reasons, notEqualStr("Count", a.Count, b.Count))
-	}
-	if !equalSlices(a.Bounds, b.Bounds) {
-		reasons = append(reasons, notEqualStr("Bounds", a.Bounds, b.Bounds))
-	}
-	if !equalSlices(a.BucketCounts, b.BucketCounts) {
-		reasons = append(reasons, notEqualStr("BucketCounts", a.BucketCounts, b.BucketCounts))
-	}
-	if !eqExtrema(a.Min, b.Min) {
-		reasons = append(reasons, notEqualStr("Min", a.Min, b.Min))
-	}
-	if !eqExtrema(a.Max, b.Max) {
-		reasons = append(reasons, notEqualStr("Max", a.Max, b.Max))
-	}
-	if a.Sum != b.Sum {
-		reasons = append(reasons, notEqualStr("Sum", a.Sum, b.Sum))
+	if !cfg.ignoreValue {
+		if a.Count != b.Count {
+			reasons = append(reasons, notEqualStr("Count", a.Count, b.Count))
+		}
+		if !equalSlices(a.Bounds, b.Bounds) {
+			reasons = append(reasons, notEqualStr("Bounds", a.Bounds, b.Bounds))
+		}
+		if !equalSlices(a.BucketCounts, b.BucketCounts) {
+			reasons = append(reasons, notEqualStr("BucketCounts", a.BucketCounts, b.BucketCounts))
+		}
+		if !eqExtrema(a.Min, b.Min) {
+			reasons = append(reasons, notEqualStr("Min", a.Min, b.Min))
+		}
+		if !eqExtrema(a.Max, b.Max) {
+			reasons = append(reasons, notEqualStr("Max", a.Max, b.Max))
+		}
+		if a.Sum != b.Sum {
+			reasons = append(reasons, notEqualStr("Sum", a.Sum, b.Sum))
+		}
 	}
 	if !cfg.ignoreExemplars {
 		r := compareDiff(diffSlices(
@@ -366,35 +376,36 @@ func equalExponentialHistogramDataPoints[N int64 | float64](a, b metricdata.Expo
 			reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano()))
 		}
 	}
-	if a.Count != b.Count {
-		reasons = append(reasons, notEqualStr("Count", a.Count, b.Count))
-	}
-	if !eqExtrema(a.Min, b.Min) {
-		reasons = append(reasons, notEqualStr("Min", a.Min, b.Min))
-	}
-	if !eqExtrema(a.Max, b.Max) {
-		reasons = append(reasons, notEqualStr("Max", a.Max, b.Max))
-	}
-	if a.Sum != b.Sum {
-		reasons = append(reasons, notEqualStr("Sum", a.Sum, b.Sum))
-	}
+	if !cfg.ignoreValue {
+		if a.Count != b.Count {
+			reasons = append(reasons, notEqualStr("Count", a.Count, b.Count))
+		}
+		if !eqExtrema(a.Min, b.Min) {
+			reasons = append(reasons, notEqualStr("Min", a.Min, b.Min))
+		}
+		if !eqExtrema(a.Max, b.Max) {
+			reasons = append(reasons, notEqualStr("Max", a.Max, b.Max))
+		}
+		if a.Sum != b.Sum {
+			reasons = append(reasons, notEqualStr("Sum", a.Sum, b.Sum))
+		}
 
-	if a.Scale != b.Scale {
-		reasons = append(reasons, notEqualStr("Scale", a.Scale, b.Scale))
-	}
-	if a.ZeroCount != b.ZeroCount {
-		reasons = append(reasons, notEqualStr("ZeroCount", a.ZeroCount, b.ZeroCount))
-	}
+		if a.Scale != b.Scale {
+			reasons = append(reasons, notEqualStr("Scale", a.Scale, b.Scale))
+		}
+		if a.ZeroCount != b.ZeroCount {
+			reasons = append(reasons, notEqualStr("ZeroCount", a.ZeroCount, b.ZeroCount))
+		}
 
-	r := equalExponentialBuckets(a.PositiveBucket, b.PositiveBucket, cfg)
-	if len(r) > 0 {
-		reasons = append(reasons, r...)
-	}
-	r = equalExponentialBuckets(a.NegativeBucket, b.NegativeBucket, cfg)
-	if len(r) > 0 {
-		reasons = append(reasons, r...)
+		r := equalExponentialBuckets(a.PositiveBucket, b.PositiveBucket, cfg)
+		if len(r) > 0 {
+			reasons = append(reasons, r...)
+		}
+		r = equalExponentialBuckets(a.NegativeBucket, b.NegativeBucket, cfg)
+		if len(r) > 0 {
+			reasons = append(reasons, r...)
+		}
 	}
-
 	if !cfg.ignoreExemplars {
 		r := compareDiff(diffSlices(
 			a.Exemplars,
@@ -421,6 +432,69 @@ func equalExponentialBuckets(a, b metricdata.ExponentialBucket, _ config) (reaso
 	return reasons
 }
 
+func equalSummary(a, b metricdata.Summary, cfg config) (reasons []string) {
+	r := compareDiff(diffSlices(
+		a.DataPoints,
+		b.DataPoints,
+		func(a, b metricdata.SummaryDataPoint) bool {
+			r := equalSummaryDataPoint(a, b, cfg)
+			return len(r) == 0
+		},
+	))
+	if r != "" {
+		reasons = append(reasons, fmt.Sprintf("Summary DataPoints not equal:\n%s", r))
+	}
+	return reasons
+}
+
+func equalSummaryDataPoint(a, b metricdata.SummaryDataPoint, cfg config) (reasons []string) {
+	if !a.Attributes.Equals(&b.Attributes) {
+		reasons = append(reasons, notEqualStr(
+			"Attributes",
+			a.Attributes.Encoded(attribute.DefaultEncoder()),
+			b.Attributes.Encoded(attribute.DefaultEncoder()),
+		))
+	}
+	if !cfg.ignoreTimestamp {
+		if !a.StartTime.Equal(b.StartTime) {
+			reasons = append(reasons, notEqualStr("StartTime", a.StartTime.UnixNano(), b.StartTime.UnixNano()))
+		}
+		if !a.Time.Equal(b.Time) {
+			reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano()))
+		}
+	}
+	if !cfg.ignoreValue {
+		if a.Count != b.Count {
+			reasons = append(reasons, notEqualStr("Count", a.Count, b.Count))
+		}
+		if a.Sum != b.Sum {
+			reasons = append(reasons, notEqualStr("Sum", a.Sum, b.Sum))
+		}
+		r := compareDiff(diffSlices(
+			a.QuantileValues,
+			b.QuantileValues,
+			func(a, b metricdata.QuantileValue) bool {
+				r := equalQuantileValue(a, b, cfg)
+				return len(r) == 0
+			},
+		))
+		if r != "" {
+			reasons = append(reasons, r)
+		}
+	}
+	return reasons
+}
+
+func equalQuantileValue(a, b metricdata.QuantileValue, _ config) (reasons []string) {
+	if a.Quantile != b.Quantile {
+		reasons = append(reasons, notEqualStr("Quantile", a.Quantile, b.Quantile))
+	}
+	if a.Value != b.Value {
+		reasons = append(reasons, notEqualStr("Value", a.Value, b.Value))
+	}
+	return reasons
+}
+
 func notEqualStr(prefix string, expected, actual interface{}) string {
 	return fmt.Sprintf("%s not equal:\nexpected: %v\nactual: %v", prefix, expected, actual)
 }
@@ -518,8 +592,10 @@ func equalExemplars[N int64 | float64](a, b metricdata.Exemplar[N], cfg config)
 			reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano()))
 		}
 	}
-	if a.Value != b.Value {
-		reasons = append(reasons, notEqualStr("Value", a.Value, b.Value))
+	if !cfg.ignoreValue {
+		if a.Value != b.Value {
+			reasons = append(reasons, notEqualStr("Value", a.Value, b.Value))
+		}
 	}
 	if !equalSlices(a.SpanID, b.SpanID) {
 		reasons = append(reasons, notEqualStr("SpanID", a.SpanID, b.SpanID))
@@ -709,6 +785,8 @@ func hasAttributesAggregation(agg metricdata.Aggregation, attrs ...attribute.Key
 		reasons = hasAttributesExponentialHistogram(agg, attrs...)
 	case metricdata.ExponentialHistogram[float64]:
 		reasons = hasAttributesExponentialHistogram(agg, attrs...)
+	case metricdata.Summary:
+		reasons = hasAttributesSummary(agg, attrs...)
 	default:
 		reasons = []string{fmt.Sprintf("unknown aggregation %T", agg)}
 	}
@@ -734,6 +812,7 @@ func hasAttributesScopeMetrics(sm metricdata.ScopeMetrics, attrs ...attribute.Ke
 	}
 	return reasons
 }
+
 func hasAttributesResourceMetrics(rm metricdata.ResourceMetrics, attrs ...attribute.KeyValue) (reasons []string) {
 	for n, sm := range rm.ScopeMetrics {
 		reas := hasAttributesScopeMetrics(sm, attrs...)
@@ -744,3 +823,28 @@ func hasAttributesResourceMetrics(rm metricdata.ResourceMetrics, attrs ...attrib
 	}
 	return reasons
 }
+
+func hasAttributesSummary(summary metricdata.Summary, attrs ...attribute.KeyValue) (reasons []string) {
+	for n, dp := range summary.DataPoints {
+		reas := hasAttributesSummaryDataPoint(dp, attrs...)
+		if len(reas) > 0 {
+			reasons = append(reasons, fmt.Sprintf("summary datapoint %d attributes:\n", n))
+			reasons = append(reasons, reas...)
+		}
+	}
+	return reasons
+}
+
+func hasAttributesSummaryDataPoint(dp metricdata.SummaryDataPoint, attrs ...attribute.KeyValue) (reasons []string) {
+	for _, attr := range attrs {
+		val, ok := dp.Attributes.Value(attr.Key)
+		if !ok {
+			reasons = append(reasons, missingAttrStr(string(attr.Key)))
+			continue
+		}
+		if val != attr.Value {
+			reasons = append(reasons, notEqualStr(string(attr.Key), attr.Value.Emit(), val.Emit()))
+		}
+	}
+	return reasons
+}
diff --git a/sdk/metric/periodic_reader.go b/sdk/metric/periodic_reader.go
index 2a85456102a..ff86999c759 100644
--- a/sdk/metric/periodic_reader.go
+++ b/sdk/metric/periodic_reader.go
@@ -127,7 +127,8 @@ func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) *Peri
 		rmPool: sync.Pool{
 			New: func() interface{} {
 				return &metricdata.ResourceMetrics{}
-			}},
+			},
+		},
 	}
 	r.externalProducers.Store(conf.producers)
 
diff --git a/sdk/metric/periodic_reader_test.go b/sdk/metric/periodic_reader_test.go
index 2f055796dd1..a5ac94fe367 100644
--- a/sdk/metric/periodic_reader_test.go
+++ b/sdk/metric/periodic_reader_test.go
@@ -202,8 +202,6 @@ type periodicReaderTestSuite struct {
 }
 
 func (ts *periodicReaderTestSuite) SetupTest() {
-	ts.Reader = ts.Factory()
-
 	e := &fnExporter{
 		exportFunc:   func(context.Context, *metricdata.ResourceMetrics) error { return assert.AnError },
 		flushFunc:    func(context.Context) error { return assert.AnError },
@@ -344,7 +342,8 @@ func TestPeriodicReaderFlushesPending(t *testing.T) {
 					return ctx.Err()
 				}
 				return nil
-			}})
+			},
+		})
 		assert.ErrorIs(t, r.ForceFlush(context.Background()), context.DeadlineExceeded)
 		assert.False(t, *called, "exporter Export method called when it should have failed before export")
 
@@ -396,7 +395,8 @@ func TestPeriodicReaderFlushesPending(t *testing.T) {
 					return ctx.Err()
 				}
 				return nil
-			}})
+			},
+		})
 		assert.ErrorIs(t, r.Shutdown(context.Background()), context.DeadlineExceeded)
 		assert.False(t, *called, "exporter Export method called when it should have failed before export")
 	})
@@ -427,12 +427,13 @@ func TestPeriodicReaderMultipleForceFlush(t *testing.T) {
 	r.register(testSDKProducer{})
 	require.NoError(t, r.ForceFlush(ctx))
 	require.NoError(t, r.ForceFlush(ctx))
+	require.NoError(t, r.Shutdown(ctx))
 }
 
 func BenchmarkPeriodicReader(b *testing.B) {
-	b.Run("Collect", benchReaderCollectFunc(
-		NewPeriodicReader(new(fnExporter)),
-	))
+	r := NewPeriodicReader(new(fnExporter))
+	b.Run("Collect", benchReaderCollectFunc(r))
+	require.NoError(b, r.Shutdown(context.Background()))
 }
 
 func TestPeriodiclReaderTemporality(t *testing.T) {
diff --git a/sdk/metric/pipeline.go b/sdk/metric/pipeline.go
index 132da653b75..4beb28512a9 100644
--- a/sdk/metric/pipeline.go
+++ b/sdk/metric/pipeline.go
@@ -231,7 +231,7 @@ func newInserter[N int64 | float64](p *pipeline, vc *cache[string, instID]) *ins
 //
 // If an instrument is determined to use a Drop aggregation, that instrument is
 // not inserted nor returned.
-func (i *inserter[N]) Instrument(inst Instrument) ([]aggregate.Measure[N], error) {
+func (i *inserter[N]) Instrument(inst Instrument, readerAggregation Aggregation) ([]aggregate.Measure[N], error) {
 	var (
 		matched  bool
 		measures []aggregate.Measure[N]
@@ -245,8 +245,7 @@ func (i *inserter[N]) Instrument(inst Instrument) ([]aggregate.Measure[N], error
 			continue
 		}
 		matched = true
-
-		in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream)
+		in, id, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
 		if err != nil {
 			errs.append(err)
 		}
@@ -271,7 +270,7 @@ func (i *inserter[N]) Instrument(inst Instrument) ([]aggregate.Measure[N], error
 		Description: inst.Description,
 		Unit:        inst.Unit,
 	}
-	in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream)
+	in, _, err := i.cachedAggregator(inst.Scope, inst.Kind, stream, readerAggregation)
 	if err != nil {
 		errs.append(err)
 	}
@@ -291,6 +290,31 @@ type aggVal[N int64 | float64] struct {
 	Err     error
 }
 
+// readerDefaultAggregation returns the default aggregation for the instrument
+// kind based on the reader's aggregation preferences. This is used unless the
+// aggregation is overridden with a view.
+func (i *inserter[N]) readerDefaultAggregation(kind InstrumentKind) Aggregation {
+	aggregation := i.pipeline.reader.aggregation(kind)
+	switch aggregation.(type) {
+	case nil, AggregationDefault:
+		// If the reader returns default or nil use the default selector.
+		aggregation = DefaultAggregationSelector(kind)
+	default:
+		// Deep copy and validate before using.
+		aggregation = aggregation.copy()
+		if err := aggregation.err(); err != nil {
+			orig := aggregation
+			aggregation = DefaultAggregationSelector(kind)
+			global.Error(
+				err, "using default aggregation instead",
+				"aggregation", orig,
+				"replacement", aggregation,
+			)
+		}
+	}
+	return aggregation
+}
+
 // cachedAggregator returns the appropriate aggregate input and output
 // functions for an instrument configuration. If the exact instrument has been
 // created within the inst.Scope, those aggregate function instances will be
@@ -305,29 +329,14 @@ type aggVal[N int64 | float64] struct {
 //
 // If the instrument defines an unknown or incompatible aggregation, an error
 // is returned.
-func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream) (meas aggregate.Measure[N], aggID uint64, err error) {
+func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind InstrumentKind, stream Stream, readerAggregation Aggregation) (meas aggregate.Measure[N], aggID uint64, err error) {
 	switch stream.Aggregation.(type) {
 	case nil:
-		// Undefined, nil, means to use the default from the reader.
-		stream.Aggregation = i.pipeline.reader.aggregation(kind)
-		switch stream.Aggregation.(type) {
-		case nil, AggregationDefault:
-			// If the reader returns default or nil use the default selector.
-			stream.Aggregation = DefaultAggregationSelector(kind)
-		default:
-			// Deep copy and validate before using.
-			stream.Aggregation = stream.Aggregation.copy()
-			if err := stream.Aggregation.err(); err != nil {
-				orig := stream.Aggregation
-				stream.Aggregation = DefaultAggregationSelector(kind)
-				global.Error(
-					err, "using default aggregation instead",
-					"aggregation", orig,
-					"replacement", stream.Aggregation,
-				)
-			}
-		}
+		// The aggregation was not overridden with a view. Use the aggregation
+		// provided by the reader.
+		stream.Aggregation = readerAggregation
 	case AggregationDefault:
+		// The view explicitly requested the default aggregation.
 		stream.Aggregation = DefaultAggregationSelector(kind)
 	}
 
@@ -352,9 +361,7 @@ func (i *inserter[N]) cachedAggregator(scope instrumentation.Scope, kind Instrum
 			Temporality:      i.pipeline.reader.temporality(kind),
 			AggregationLimit: cardinalityLimit(),
 		}
-		if len(stream.AllowAttributeKeys) > 0 {
-			b.Filter = stream.attributeFilter()
-		}
+		b.Filter = stream.AttributeFilter
 		in, out, err := i.aggregateFunc(b, stream.Aggregation, kind)
 		if err != nil {
 			return aggVal[N]{0, nil, err}
@@ -599,7 +606,29 @@ func (r resolver[N]) Aggregators(id Instrument) ([]aggregate.Measure[N], error)
 
 	errs := &multierror{}
 	for _, i := range r.inserters {
-		in, err := i.Instrument(id)
+		in, err := i.Instrument(id, i.readerDefaultAggregation(id.Kind))
+		if err != nil {
+			errs.append(err)
+		}
+		measures = append(measures, in...)
+	}
+	return measures, errs.errorOrNil()
+}
+
+// HistogramAggregators returns the histogram Aggregators that must be updated by the instrument
+// defined by key. If boundaries were provided on instrument instantiation, those take precedence
+// over boundaries provided by the reader.
+func (r resolver[N]) HistogramAggregators(id Instrument, boundaries []float64) ([]aggregate.Measure[N], error) {
+	var measures []aggregate.Measure[N]
+
+	errs := &multierror{}
+	for _, i := range r.inserters {
+		agg := i.readerDefaultAggregation(id.Kind)
+		if histAgg, ok := agg.(AggregationExplicitBucketHistogram); ok && len(boundaries) > 0 {
+			histAgg.Boundaries = boundaries
+			agg = histAgg
+		}
+		in, err := i.Instrument(id, agg)
 		if err != nil {
 			errs.append(err)
 		}
diff --git a/sdk/metric/pipeline_registry_test.go b/sdk/metric/pipeline_registry_test.go
index 89293e679e4..fe01d6971b3 100644
--- a/sdk/metric/pipeline_registry_test.go
+++ b/sdk/metric/pipeline_registry_test.go
@@ -39,6 +39,7 @@ type invalidAggregation struct{}
 func (invalidAggregation) copy() Aggregation {
 	return invalidAggregation{}
 }
+
 func (invalidAggregation) err() error {
 	return nil
 }
@@ -155,7 +156,7 @@ func testCreateAggregators[N int64 | float64](t *testing.T) {
 	)
 
 	instruments := []Instrument{
-		{Name: "foo", Kind: InstrumentKind(0)}, //Unknown kind
+		{Name: "foo", Kind: InstrumentKind(0)}, // Unknown kind
 		{Name: "foo", Kind: InstrumentKindCounter},
 		{Name: "foo", Kind: InstrumentKindUpDownCounter},
 		{Name: "foo", Kind: InstrumentKindHistogram},
@@ -350,7 +351,8 @@ func testCreateAggregators[N int64 | float64](t *testing.T) {
 			var c cache[string, instID]
 			p := newPipeline(nil, tt.reader, tt.views)
 			i := newInserter[N](p, &c)
-			input, err := i.Instrument(tt.inst)
+			readerAggregation := i.readerDefaultAggregation(tt.inst.Kind)
+			input, err := i.Instrument(tt.inst, readerAggregation)
 			var comps []aggregate.ComputeAggregation
 			for _, instSyncs := range p.aggregations {
 				for _, i := range instSyncs {
@@ -374,7 +376,8 @@ func testInvalidInstrumentShouldPanic[N int64 | float64]() {
 		Name: "foo",
 		Kind: InstrumentKind(255),
 	}
-	_, _ = i.Instrument(inst)
+	readerAggregation := i.readerDefaultAggregation(inst.Kind)
+	_, _ = i.Instrument(inst, readerAggregation)
 }
 
 func TestInvalidInstrumentShouldPanic(t *testing.T) {
@@ -459,6 +462,8 @@ func TestPipelineRegistryCreateAggregators(t *testing.T) {
 			p := newPipelines(resource.Empty(), tt.readers, tt.views)
 			testPipelineRegistryResolveIntAggregators(t, p, tt.wantCount)
 			testPipelineRegistryResolveFloatAggregators(t, p, tt.wantCount)
+			testPipelineRegistryResolveIntHistogramAggregators(t, p, tt.wantCount)
+			testPipelineRegistryResolveFloatHistogramAggregators(t, p, tt.wantCount)
 		})
 	}
 }
@@ -483,6 +488,26 @@ func testPipelineRegistryResolveFloatAggregators(t *testing.T, p pipelines, want
 	require.Len(t, aggs, wantCount)
 }
 
+func testPipelineRegistryResolveIntHistogramAggregators(t *testing.T, p pipelines, wantCount int) {
+	inst := Instrument{Name: "foo", Kind: InstrumentKindCounter}
+	var c cache[string, instID]
+	r := newResolver[int64](p, &c)
+	aggs, err := r.HistogramAggregators(inst, []float64{1, 2, 3})
+	assert.NoError(t, err)
+
+	require.Len(t, aggs, wantCount)
+}
+
+func testPipelineRegistryResolveFloatHistogramAggregators(t *testing.T, p pipelines, wantCount int) {
+	inst := Instrument{Name: "foo", Kind: InstrumentKindCounter}
+	var c cache[string, instID]
+	r := newResolver[float64](p, &c)
+	aggs, err := r.HistogramAggregators(inst, []float64{1, 2, 3})
+	assert.NoError(t, err)
+
+	require.Len(t, aggs, wantCount)
+}
+
 func TestPipelineRegistryResource(t *testing.T) {
 	v := NewView(Instrument{Name: "bar"}, Stream{Name: "foo"})
 	readers := []Reader{NewManualReader()}
@@ -512,6 +537,14 @@ func TestPipelineRegistryCreateAggregatorsIncompatibleInstrument(t *testing.T) {
 	floatAggs, err := rf.Aggregators(inst)
 	assert.Error(t, err)
 	assert.Len(t, floatAggs, 0)
+
+	intAggs, err = ri.HistogramAggregators(inst, []float64{1, 2, 3})
+	assert.Error(t, err)
+	assert.Len(t, intAggs, 0)
+
+	floatAggs, err = rf.HistogramAggregators(inst, []float64{1, 2, 3})
+	assert.Error(t, err)
+	assert.Len(t, floatAggs, 0)
 }
 
 type logCounter struct {
diff --git a/sdk/metric/pipeline_test.go b/sdk/metric/pipeline_test.go
index 1026fd268ff..f585c7a4743 100644
--- a/sdk/metric/pipeline_test.go
+++ b/sdk/metric/pipeline_test.go
@@ -146,7 +146,8 @@ func testDefaultViewImplicit[N int64 | float64]() func(t *testing.T) {
 			t.Run(test.name, func(t *testing.T) {
 				var c cache[string, instID]
 				i := newInserter[N](test.pipe, &c)
-				got, err := i.Instrument(inst)
+				readerAggregation := i.readerDefaultAggregation(inst.Kind)
+				got, err := i.Instrument(inst, readerAggregation)
 				require.NoError(t, err)
 				assert.Len(t, got, 1, "default view not applied")
 				for _, in := range got {
@@ -372,7 +373,8 @@ func TestInserterCachedAggregatorNameConflict(t *testing.T) {
 	pipe := newPipeline(nil, NewManualReader(), nil)
 	i := newInserter[int64](pipe, &vc)
 
-	_, origID, err := i.cachedAggregator(scope, kind, stream)
+	readerAggregation := i.readerDefaultAggregation(kind)
+	_, origID, err := i.cachedAggregator(scope, kind, stream, readerAggregation)
 	require.NoError(t, err)
 
 	require.Len(t, pipe.aggregations, 1)
@@ -382,7 +384,7 @@ func TestInserterCachedAggregatorNameConflict(t *testing.T) {
 	require.Equal(t, name, iSync[0].name)
 
 	stream.Name = "RequestCount"
-	_, id, err := i.cachedAggregator(scope, kind, stream)
+	_, id, err := i.cachedAggregator(scope, kind, stream, readerAggregation)
 	require.NoError(t, err)
 	assert.Equal(t, origID, id, "multiple aggregators for equivalent name")
 
diff --git a/sdk/metric/reader.go b/sdk/metric/reader.go
index 6ad9680413b..65cedaf3c07 100644
--- a/sdk/metric/reader.go
+++ b/sdk/metric/reader.go
@@ -50,6 +50,8 @@ var errNonPositiveDuration = fmt.Errorf("non-positive duration")
 //
 // Pull-based exporters will typically implement Register
 // themselves, since they read on demand.
+//
+// Warning: methods may be added to this interface in minor releases.
 type Reader interface {
 	// register registers a Reader with a MeterProvider.
 	// The producer argument allows the Reader to signal the sdk to collect
@@ -72,9 +74,11 @@ type Reader interface {
 	// the SDK and stores it in out. An error is returned if this is called
 	// after Shutdown or if out is nil.
 	//
-	// This method needs to be concurrent safe, and the cancelation of the
+	// This method needs to be concurrent safe, and the cancellation of the
 	// passed context is expected to be honored.
 	Collect(ctx context.Context, rm *metricdata.ResourceMetrics) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
 
 	// Shutdown flushes all metric measurements held in an export pipeline and releases any
 	// held computational resources.
@@ -89,6 +93,8 @@ type Reader interface {
 	//
 	// This method needs to be concurrent safe.
 	Shutdown(context.Context) error
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
 }
 
 // sdkProducer produces metrics for a Reader.
@@ -101,10 +107,15 @@ type sdkProducer interface {
 
 // Producer produces metrics for a Reader from an external source.
 type Producer interface {
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
+
 	// Produce returns aggregated metrics from an external source.
 	//
 	// This method should be safe to call concurrently.
 	Produce(context.Context) ([]metricdata.ScopeMetrics, error)
+	// DO NOT CHANGE: any modification will not be backwards compatible and
+	// must never be done outside of a new major release.
 }
 
 // produceHolder is used as an atomic.Value to wrap the non-concrete producer
diff --git a/sdk/metric/reader_test.go b/sdk/metric/reader_test.go
index a1e9e507b9d..48d36155ec6 100644
--- a/sdk/metric/reader_test.go
+++ b/sdk/metric/reader_test.go
@@ -138,7 +138,8 @@ func (ts *readerTestSuite) TestSDKFailureBlocksExternalProducer() {
 		produceFunc: func(ctx context.Context, rm *metricdata.ResourceMetrics) error {
 			*rm = metricdata.ResourceMetrics{}
 			return assert.AnError
-		}})
+		},
+	})
 
 	m := metricdata.ResourceMetrics{}
 	err := ts.Reader.Collect(context.Background(), &m)
diff --git a/sdk/metric/version.go b/sdk/metric/version.go
index cd4666b3223..edcf7cfc862 100644
--- a/sdk/metric/version.go
+++ b/sdk/metric/version.go
@@ -16,5 +16,5 @@ package metric // import "go.opentelemetry.io/otel/sdk/metric"
 
 // version is the current release version of the metric SDK in use.
 func version() string {
-	return "0.39.0"
+	return "1.21.0"
 }
diff --git a/sdk/metric/view.go b/sdk/metric/view.go
index 2d0fe18d7e9..65f243befed 100644
--- a/sdk/metric/view.go
+++ b/sdk/metric/view.go
@@ -42,10 +42,10 @@ type View func(Instrument) (Stream, bool)
 // view that matches no instruments is returned. If you need to match a
 // zero-value field, create a View directly.
 //
-// The Name field of criteria supports wildcard pattern matching. The wildcard
-// "*" is recognized as matching zero or more characters, and "?" is recognized
-// as matching exactly one character. For example, a pattern of "*" will match
-// all instrument names.
+// The Name field of criteria supports wildcard pattern matching. The "*"
+// wildcard is recognized as matching zero or more characters, and "?" is
+// recognized as matching exactly one character. For example, a pattern of "*"
+// matches all instrument names.
 //
 // The Stream mask only applies updates for non-zero-value fields. By default,
 // the Instrument the View matches against will be use for the Name,
@@ -107,11 +107,11 @@ func NewView(criteria Instrument, mask Stream) View {
 	return func(i Instrument) (Stream, bool) {
 		if matchFunc(i) {
 			return Stream{
-				Name:               nonZero(mask.Name, i.Name),
-				Description:        nonZero(mask.Description, i.Description),
-				Unit:               nonZero(mask.Unit, i.Unit),
-				Aggregation:        agg,
-				AllowAttributeKeys: mask.AllowAttributeKeys,
+				Name:            nonZero(mask.Name, i.Name),
+				Description:     nonZero(mask.Description, i.Description),
+				Unit:            nonZero(mask.Unit, i.Unit),
+				Aggregation:     agg,
+				AttributeFilter: mask.AttributeFilter,
 			}, true
 		}
 		return Stream{}, false
diff --git a/sdk/metric/view_test.go b/sdk/metric/view_test.go
index 07f0c906cb8..0cf5646f243 100644
--- a/sdk/metric/view_test.go
+++ b/sdk/metric/view_test.go
@@ -15,8 +15,6 @@
 package metric // import "go.opentelemetry.io/otel/sdk/metric"
 
 import (
-	"fmt"
-	"regexp"
 	"testing"
 
 	"github.com/go-logr/logr"
@@ -404,18 +402,6 @@ func TestNewViewReplace(t *testing.T) {
 				}
 			},
 		},
-		{
-			name: "AttributeKeys",
-			mask: Stream{AllowAttributeKeys: []attribute.Key{"test"}},
-			want: func(i Instrument) Stream {
-				return Stream{
-					Name:               i.Name,
-					Description:        i.Description,
-					Unit:               i.Unit,
-					AllowAttributeKeys: []attribute.Key{"test"},
-				}
-			},
-		},
 		{
 			name: "Complete",
 			mask: Stream{
@@ -442,6 +428,23 @@ func TestNewViewReplace(t *testing.T) {
 			assert.Equal(t, test.want(completeIP), got)
 		})
 	}
+
+	// Go does not allow for the comparison of function values, even their
+	// addresses. Therefore, the AttributeFilter field needs an alternative
+	// testing strategy.
+	t.Run("AttributeFilter", func(t *testing.T) {
+		allowed := attribute.String("key", "val")
+		filter := func(kv attribute.KeyValue) bool {
+			return kv == allowed
+		}
+		mask := Stream{AttributeFilter: filter}
+		got, match := NewView(completeIP, mask)(completeIP)
+		require.True(t, match, "view did not match exact criteria")
+		require.NotNil(t, got.AttributeFilter, "AttributeFilter not set")
+		assert.True(t, got.AttributeFilter(allowed), "wrong AttributeFilter")
+		other := attribute.String("key", "other val")
+		assert.False(t, got.AttributeFilter(other), "wrong AttributeFilter")
+	})
 }
 
 type badAgg struct {
@@ -488,134 +491,3 @@ func TestNewViewMultiInstMatchErrorLogged(t *testing.T) {
 	})
 	assert.Contains(t, got, errMultiInst.Error())
 }
-
-func ExampleNewView() {
-	// Create a view that renames the "latency" instrument from the v0.34.0
-	// version of the "http" instrumentation library as "request.latency".
-	view := NewView(Instrument{
-		Name: "latency",
-		Scope: instrumentation.Scope{
-			Name:    "http",
-			Version: "v0.34.0",
-		},
-	}, Stream{Name: "request.latency"})
-
-	// The created view can then be registered with the OpenTelemetry metric
-	// SDK using the WithView option. Below is an example of how the view will
-	// function in the SDK for certain instruments.
-
-	stream, _ := view(Instrument{
-		Name:        "latency",
-		Description: "request latency",
-		Unit:        "ms",
-		Kind:        InstrumentKindCounter,
-		Scope: instrumentation.Scope{
-			Name:      "http",
-			Version:   "v0.34.0",
-			SchemaURL: "https://opentelemetry.io/schemas/1.0.0",
-		},
-	})
-	fmt.Println("name:", stream.Name)
-	fmt.Println("description:", stream.Description)
-	fmt.Println("unit:", stream.Unit)
-	// Output:
-	// name: request.latency
-	// description: request latency
-	// unit: ms
-}
-
-func ExampleNewView_drop() {
-	// Create a view that sets the drop aggregator for all instrumentation from
-	// the "db" library, effectively turning-off all instrumentation from that
-	// library.
-	view := NewView(
-		Instrument{Scope: instrumentation.Scope{Name: "db"}},
-		Stream{Aggregation: AggregationDrop{}},
-	)
-
-	// The created view can then be registered with the OpenTelemetry metric
-	// SDK using the WithView option. Below is an example of how the view will
-	// function in the SDK for certain instruments.
-
-	stream, _ := view(Instrument{
-		Name:  "queries",
-		Kind:  InstrumentKindCounter,
-		Scope: instrumentation.Scope{Name: "db", Version: "v0.4.0"},
-	})
-	fmt.Println("name:", stream.Name)
-	fmt.Printf("aggregation: %#v", stream.Aggregation)
-	// Output:
-	// name: queries
-	// aggregation: metric.AggregationDrop{}
-}
-
-func ExampleNewView_wildcard() {
-	// Create a view that sets unit to milliseconds for any instrument with a
-	// name suffix of ".ms".
-	view := NewView(
-		Instrument{Name: "*.ms"},
-		Stream{Unit: "ms"},
-	)
-
-	// The created view can then be registered with the OpenTelemetry metric
-	// SDK using the WithView option. Below is an example of how the view
-	// function in the SDK for certain instruments.
-
-	stream, _ := view(Instrument{
-		Name: "computation.time.ms",
-		Unit: "1",
-	})
-	fmt.Println("name:", stream.Name)
-	fmt.Println("unit:", stream.Unit)
-	// Output:
-	// name: computation.time.ms
-	// unit: ms
-}
-
-func ExampleView() {
-	// The NewView function provides convenient creation of common Views
-	// construction. However, it is limited in what it can create.
-	//
-	// When NewView is not able to provide the functionally needed, a custom
-	// View can be constructed directly. Here a custom View is constructed that
-	// uses Go's regular expression matching to ensure all data stream names
-	// have a suffix of the units it uses.
-
-	re := regexp.MustCompile(`[._](ms|byte)$`)
-	var view View = func(i Instrument) (Stream, bool) {
-		s := Stream{Name: i.Name, Description: i.Description, Unit: i.Unit}
-		// Any instrument that does not have a unit suffix defined, but has a
-		// dimensional unit defined, update the name with a unit suffix.
-		if re.MatchString(i.Name) {
-			return s, false
-		}
-		switch i.Unit {
-		case "ms":
-			s.Name += ".ms"
-		case "By":
-			s.Name += ".byte"
-		default:
-			return s, false
-		}
-		return s, true
-	}
-
-	// The created view can then be registered with the OpenTelemetry metric
-	// SDK using the WithView option. Below is an example of how the view will
-	// function in the SDK for certain instruments.
-
-	stream, _ := view(Instrument{
-		Name: "computation.time.ms",
-		Unit: "ms",
-	})
-	fmt.Println("name:", stream.Name)
-
-	stream, _ = view(Instrument{
-		Name: "heap.size",
-		Unit: "By",
-	})
-	fmt.Println("name:", stream.Name)
-	// Output:
-	// name: computation.time.ms
-	// name: heap.size.byte
-}
diff --git a/sdk/resource/auto.go b/sdk/resource/auto.go
index 324dd4baf24..4279013be88 100644
--- a/sdk/resource/auto.go
+++ b/sdk/resource/auto.go
@@ -21,12 +21,10 @@ import (
 	"strings"
 )
 
-var (
-	// ErrPartialResource is returned by a detector when complete source
-	// information for a Resource is unavailable or the source information
-	// contains invalid values that are omitted from the returned Resource.
-	ErrPartialResource = errors.New("partial resource")
-)
+// ErrPartialResource is returned by a detector when complete source
+// information for a Resource is unavailable or the source information
+// contains invalid values that are omitted from the returned Resource.
+var ErrPartialResource = errors.New("partial resource")
 
 // Detector detects OpenTelemetry resource information.
 type Detector interface {
diff --git a/sdk/resource/benchmark_test.go b/sdk/resource/benchmark_test.go
index ea72c5a2186..c43e7a6b8be 100644
--- a/sdk/resource/benchmark_test.go
+++ b/sdk/resource/benchmark_test.go
@@ -63,21 +63,27 @@ func benchmarkMergeResource(b *testing.B, size int) {
 func BenchmarkMergeResource_1(b *testing.B) {
 	benchmarkMergeResource(b, 1)
 }
+
 func BenchmarkMergeResource_2(b *testing.B) {
 	benchmarkMergeResource(b, 2)
 }
+
 func BenchmarkMergeResource_3(b *testing.B) {
 	benchmarkMergeResource(b, 3)
 }
+
 func BenchmarkMergeResource_4(b *testing.B) {
 	benchmarkMergeResource(b, 4)
 }
+
 func BenchmarkMergeResource_6(b *testing.B) {
 	benchmarkMergeResource(b, 6)
 }
+
 func BenchmarkMergeResource_8(b *testing.B) {
 	benchmarkMergeResource(b, 8)
 }
+
 func BenchmarkMergeResource_16(b *testing.B) {
 	benchmarkMergeResource(b, 16)
 }
diff --git a/sdk/resource/env.go b/sdk/resource/env.go
index a847c50622e..e29ae563a69 100644
--- a/sdk/resource/env.go
+++ b/sdk/resource/env.go
@@ -28,16 +28,14 @@ import (
 
 const (
 	// resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
-	resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES"
+	resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials
 
 	// svcNameKey is the environment variable name that Service Name information will be read from.
 	svcNameKey = "OTEL_SERVICE_NAME"
 )
 
-var (
-	// errMissingValue is returned when a resource value is missing.
-	errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
-)
+// errMissingValue is returned when a resource value is missing.
+var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
 
 // fromEnv is a Detector that implements the Detector and collects
 // resources from environment.  This Detector is included as a
@@ -91,7 +89,7 @@ func constructOTResources(s string) (*Resource, error) {
 			continue
 		}
 		key := strings.TrimSpace(k)
-		val, err := url.QueryUnescape(strings.TrimSpace(v))
+		val, err := url.PathUnescape(strings.TrimSpace(v))
 		if err != nil {
 			// Retain original value if decoding fails, otherwise it will be
 			// an empty string.
diff --git a/sdk/resource/env_test.go b/sdk/resource/env_test.go
index e47aaa5babd..a6754f74bc6 100644
--- a/sdk/resource/env_test.go
+++ b/sdk/resource/env_test.go
@@ -40,6 +40,19 @@ func TestDetectOnePair(t *testing.T) {
 	assert.Equal(t, NewSchemaless(attribute.String("key", "value")), res)
 }
 
+func TestDetectURIEncodingOnePair(t *testing.T) {
+	store, err := ottest.SetEnvVariables(map[string]string{
+		resourceAttrKey: "key=x+y+z?q=123",
+	})
+	require.NoError(t, err)
+	defer func() { require.NoError(t, store.Restore()) }()
+
+	detector := &fromEnv{}
+	res, err := detector.Detect(context.Background())
+	require.NoError(t, err)
+	assert.Equal(t, NewSchemaless(attribute.String("key", "x+y+z?q=123")), res)
+}
+
 func TestDetectMultiPairs(t *testing.T) {
 	store, err := ottest.SetEnvVariables(map[string]string{
 		"x":             "1",
@@ -60,6 +73,23 @@ func TestDetectMultiPairs(t *testing.T) {
 	), res)
 }
 
+func TestDetectURIEncodingMultiPairs(t *testing.T) {
+	store, err := ottest.SetEnvVariables(map[string]string{
+		"x":             "1",
+		resourceAttrKey: "key=x+y+z,namespace=localhost/test&verify",
+	})
+	require.NoError(t, err)
+	defer func() { require.NoError(t, store.Restore()) }()
+
+	detector := &fromEnv{}
+	res, err := detector.Detect(context.Background())
+	require.NoError(t, err)
+	assert.Equal(t, NewSchemaless(
+		attribute.String("key", "x+y+z"),
+		attribute.String("namespace", "localhost/test&verify"),
+	), res)
+}
+
 func TestEmpty(t *testing.T) {
 	store, err := ottest.SetEnvVariables(map[string]string{
 		resourceAttrKey: "   ",
diff --git a/sdk/resource/export_test.go b/sdk/resource/export_test.go
index 6c767e595c5..8003e3ec997 100644
--- a/sdk/resource/export_test.go
+++ b/sdk/resource/export_test.go
@@ -34,6 +34,4 @@ var (
 	RuntimeArch = runtimeArch
 )
 
-var (
-	MapRuntimeOSToSemconvOSType = mapRuntimeOSToSemconvOSType
-)
+var MapRuntimeOSToSemconvOSType = mapRuntimeOSToSemconvOSType
diff --git a/sdk/resource/os.go b/sdk/resource/os.go
index 84e1c585605..0cbd559739c 100644
--- a/sdk/resource/os.go
+++ b/sdk/resource/os.go
@@ -36,8 +36,10 @@ func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
 	osDescription = osDescriptionProvider
 }
 
-type osTypeDetector struct{}
-type osDescriptionDetector struct{}
+type (
+	osTypeDetector        struct{}
+	osDescriptionDetector struct{}
+)
 
 // Detect returns a *Resource that describes the operating system type the
 // service is running on.
@@ -56,7 +58,6 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
 // service is running on.
 func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
 	description, err := osDescription()
-
 	if err != nil {
 		return nil, err
 	}
diff --git a/sdk/resource/process.go b/sdk/resource/process.go
index e67ff29e26d..ecdd11dd762 100644
--- a/sdk/resource/process.go
+++ b/sdk/resource/process.go
@@ -25,14 +25,16 @@ import (
 	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
 )
 
-type pidProvider func() int
-type executablePathProvider func() (string, error)
-type commandArgsProvider func() []string
-type ownerProvider func() (*user.User, error)
-type runtimeNameProvider func() string
-type runtimeVersionProvider func() string
-type runtimeOSProvider func() string
-type runtimeArchProvider func() string
+type (
+	pidProvider            func() int
+	executablePathProvider func() (string, error)
+	commandArgsProvider    func() []string
+	ownerProvider          func() (*user.User, error)
+	runtimeNameProvider    func() string
+	runtimeVersionProvider func() string
+	runtimeOSProvider      func() string
+	runtimeArchProvider    func() string
+)
 
 var (
 	defaultPidProvider            pidProvider            = os.Getpid
@@ -108,14 +110,16 @@ func setUserProviders(ownerProvider ownerProvider) {
 	owner = ownerProvider
 }
 
-type processPIDDetector struct{}
-type processExecutableNameDetector struct{}
-type processExecutablePathDetector struct{}
-type processCommandArgsDetector struct{}
-type processOwnerDetector struct{}
-type processRuntimeNameDetector struct{}
-type processRuntimeVersionDetector struct{}
-type processRuntimeDescriptionDetector struct{}
+type (
+	processPIDDetector                struct{}
+	processExecutableNameDetector     struct{}
+	processExecutablePathDetector     struct{}
+	processCommandArgsDetector        struct{}
+	processOwnerDetector              struct{}
+	processRuntimeNameDetector        struct{}
+	processRuntimeVersionDetector     struct{}
+	processRuntimeDescriptionDetector struct{}
+)
 
 // Detect returns a *Resource that describes the process identifier (PID) of the
 // executing process.
diff --git a/sdk/trace/benchmark_test.go b/sdk/trace/benchmark_test.go
index b7dde8d96b0..e0172fd1d8c 100644
--- a/sdk/trace/benchmark_test.go
+++ b/sdk/trace/benchmark_test.go
@@ -285,6 +285,7 @@ func BenchmarkSpanWithEvents_WithStackTrace(b *testing.B) {
 		}
 	})
 }
+
 func BenchmarkSpanWithEvents_WithTimestamp(b *testing.B) {
 	traceBenchmark(b, "Benchmark Start With 4 Attributes", func(b *testing.B, t trace.Tracer) {
 		ctx := context.Background()
diff --git a/sdk/trace/provider.go b/sdk/trace/provider.go
index 0a018c14ded..7d46c4b48e5 100644
--- a/sdk/trace/provider.go
+++ b/sdk/trace/provider.go
@@ -25,6 +25,8 @@ import (
 	"go.opentelemetry.io/otel/sdk/instrumentation"
 	"go.opentelemetry.io/otel/sdk/resource"
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+	"go.opentelemetry.io/otel/trace/noop"
 )
 
 const (
@@ -73,6 +75,8 @@ func (cfg tracerProviderConfig) MarshalLog() interface{} {
 // TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
 // instrumentation so it can trace operational flow through a system.
 type TracerProvider struct {
+	embedded.TracerProvider
+
 	mu             sync.Mutex
 	namedTracer    map[instrumentation.Scope]*tracer
 	spanProcessors atomic.Pointer[spanProcessorStates]
@@ -139,7 +143,7 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
 func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
 	// This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown().
 	if p.isShutdown.Load() {
-		return trace.NewNoopTracerProvider().Tracer(name, opts...)
+		return noop.NewTracerProvider().Tracer(name, opts...)
 	}
 	c := trace.NewTracerConfig(opts...)
 	if name == "" {
@@ -157,7 +161,7 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
 		// Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran
 		// after the first check above but before we acquired the mutex.
 		if p.isShutdown.Load() {
-			return trace.NewNoopTracerProvider().Tracer(name, opts...), true
+			return noop.NewTracerProvider().Tracer(name, opts...), true
 		}
 		t, ok := p.namedTracer[is]
 		if !ok {
diff --git a/sdk/trace/sampling.go b/sdk/trace/sampling.go
index 5ee9715d27b..a7bc125b9e8 100644
--- a/sdk/trace/sampling.go
+++ b/sdk/trace/sampling.go
@@ -158,9 +158,9 @@ func NeverSample() Sampler {
 	return alwaysOffSampler{}
 }
 
-// ParentBased returns a composite sampler which behaves differently,
+// ParentBased returns a sampler decorator which behaves differently,
 // based on the parent of the span. If the span has no parent,
-// the root(Sampler) is used to make sampling decision. If the span has
+// the decorated sampler is used to make sampling decision. If the span has
 // a parent, depending on whether the parent is remote and whether it
 // is sampled, one of the following samplers will apply:
 //   - remoteParentSampled(Sampler) (default: AlwaysOn)
diff --git a/sdk/trace/span.go b/sdk/trace/span.go
index 37cdd4a694a..36dbf67764b 100644
--- a/sdk/trace/span.go
+++ b/sdk/trace/span.go
@@ -32,6 +32,7 @@ import (
 	"go.opentelemetry.io/otel/sdk/resource"
 	semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
 )
 
 // ReadOnlySpan allows reading information from the data structure underlying a
@@ -108,6 +109,8 @@ type ReadWriteSpan interface {
 // recordingSpan is an implementation of the OpenTelemetry Span API
 // representing the individual component of a trace that is sampled.
 type recordingSpan struct {
+	embedded.Span
+
 	// mu protects the contents of this span.
 	mu sync.Mutex
 
@@ -158,8 +161,10 @@ type recordingSpan struct {
 	tracer *tracer
 }
 
-var _ ReadWriteSpan = (*recordingSpan)(nil)
-var _ runtimeTracer = (*recordingSpan)(nil)
+var (
+	_ ReadWriteSpan = (*recordingSpan)(nil)
+	_ runtimeTracer = (*recordingSpan)(nil)
+)
 
 // SpanContext returns the SpanContext of this span.
 func (s *recordingSpan) SpanContext() trace.SpanContext {
@@ -772,6 +777,8 @@ func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
 // that wraps a SpanContext. It performs no operations other than to return
 // the wrapped SpanContext or TracerProvider that created it.
 type nonRecordingSpan struct {
+	embedded.Span
+
 	// tracer is the SDK tracer that created this span.
 	tracer *tracer
 	sc     trace.SpanContext
diff --git a/sdk/trace/span_processor_filter_example_test.go b/sdk/trace/span_processor_filter_example_test.go
index f5dd84e472a..e0ce9e0d86f 100644
--- a/sdk/trace/span_processor_filter_example_test.go
+++ b/sdk/trace/span_processor_filter_example_test.go
@@ -66,6 +66,7 @@ func (f InstrumentationBlacklist) Shutdown(ctx context.Context) error { return f
 func (f InstrumentationBlacklist) ForceFlush(ctx context.Context) error {
 	return f.Next.ForceFlush(ctx)
 }
+
 func (f InstrumentationBlacklist) OnEnd(s ReadOnlySpan) {
 	if f.Blacklist != nil && f.Blacklist[s.InstrumentationScope().Name] {
 		// Drop spans from this instrumentation
diff --git a/sdk/trace/trace_test.go b/sdk/trace/trace_test.go
index 56f53d4d2d0..5ad35314192 100644
--- a/sdk/trace/trace_test.go
+++ b/sdk/trace/trace_test.go
@@ -1407,7 +1407,8 @@ func TestWithResource(t *testing.T) {
 			name: "last resource wins",
 			options: []TracerProviderOption{
 				WithResource(resource.NewSchemaless(attribute.String("rk1", "vk1"), attribute.Int64("rk2", 5))),
-				WithResource(resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10)))},
+				WithResource(resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))),
+			},
 			want: mergeResource(t, resource.Environment(), resource.NewSchemaless(attribute.String("rk3", "rv3"), attribute.Int64("rk4", 10))),
 		},
 		{
diff --git a/sdk/trace/tracer.go b/sdk/trace/tracer.go
index 85a71227f3f..301e1a7abcc 100644
--- a/sdk/trace/tracer.go
+++ b/sdk/trace/tracer.go
@@ -20,9 +20,12 @@ import (
 
 	"go.opentelemetry.io/otel/sdk/instrumentation"
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
 )
 
 type tracer struct {
+	embedded.Tracer
+
 	provider             *TracerProvider
 	instrumentationScope instrumentation.Scope
 }
diff --git a/sdk/trace/tracetest/span.go b/sdk/trace/tracetest/span.go
index bfe73de9c41..ae8eae8e8be 100644
--- a/sdk/trace/tracetest/span.go
+++ b/sdk/trace/tracetest/span.go
@@ -162,6 +162,7 @@ func (s spanSnapshot) Resource() *resource.Resource     { return s.resource }
 func (s spanSnapshot) InstrumentationScope() instrumentation.Scope {
 	return s.instrumentationScope
 }
+
 func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library {
 	return s.instrumentationScope
 }
diff --git a/sdk/version.go b/sdk/version.go
index dbef90b0df1..422d4c964b3 100644
--- a/sdk/version.go
+++ b/sdk/version.go
@@ -16,5 +16,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
 
 // Version is the current release version of the OpenTelemetry SDK in use.
 func Version() string {
-	return "1.16.0"
+	return "1.21.0"
 }
diff --git a/semconv/template.j2 b/semconv/template.j2
index 8d20a41d50d..984f2f01d85 100644
--- a/semconv/template.j2
+++ b/semconv/template.j2
@@ -31,7 +31,11 @@ It represents {% if brief[:2] == "A " or brief[:3] == "An " or brief[:4] == "The
 {%- endif -%}
 {%- endmacro -%}
 {%- macro keydoc(attr) -%}
+{%- if attr.stability|string() == "StabilityLevel.DEPRECATED" -%}
+{{ to_go_name(attr.fqn) }}Key is the attribute Key conforming to the "{{ attr.fqn }}" semantic conventions.
+{%- else -%}
 {{ to_go_name(attr.fqn) }}Key is the attribute Key conforming to the "{{ attr.fqn }}" semantic conventions. {{ it_reps(attr.brief) }}
+{%- endif %}
 {%- endmacro -%}
 {%- macro keydetails(attr) -%}
 {%- if attr.attr_type is string %}
@@ -51,18 +55,24 @@ RequirementLevel: Recommended
 RequirementLevel: Optional
 {%- endif %}
 {{ attr.stability |  replace("Level.", ": ") | capitalize }}
-{%- if attr.deprecated != None %}
-Deprecated: {{ attr.deprecated }}
-{%- endif %}
 {%- if attr.examples is iterable %}
 Examples: {{ attr.examples | pprint | trim("[]")  }}
 {%- endif %}
 {%- if attr.note %}
 Note: {{ attr.note }}
 {%- endif %}
+{%- if attr.stability|string() == "StabilityLevel.DEPRECATED" %}
+Deprecated: {{ attr.brief | replace("Deprecated, ", "") }}
+{%- endif %}
 {%- endmacro -%}
 {%- macro fndoc(attr) -%}
+{%- if attr.stability|string() == "StabilityLevel.DEPRECATED" -%}
+// {{ to_go_name(attr.fqn) }} returns an attribute KeyValue conforming to the "{{ attr.fqn }}" semantic conventions.
+
+Deprecated: {{ attr.brief | replace("Deprecated, ", "") }}
+{%- else -%}
 // {{ to_go_name(attr.fqn) }} returns an attribute KeyValue conforming to the "{{ attr.fqn }}" semantic conventions. {{ it_reps(attr.brief) }}
+{%- endif %}
 {%- endmacro -%}
 {%- macro to_go_func(type, name) -%}
 {%- if type == "string" -%}
@@ -124,6 +134,10 @@ const (
 var (
 {%- for val in attr.attr_type.members %}
     // {{ val.brief | to_doc_brief }}
+{%- if attr.stability|string() == "StabilityLevel.DEPRECATED" %}
+    //
+    // Deprecated: {{ attr.brief | replace("Deprecated, ", "") | wordwrap(76, break_long_words=false, break_on_hyphens=false, wrapstring="\n// ") }}
+{%- endif %}
     {{to_go_name("{}.{}".format(attr.fqn, val.member_id))}} = {{to_go_name(attr.fqn)}}Key.{{to_go_attr_type(attr.attr_type.enum_type, val.value)}}
 {%- endfor %}
 )
diff --git a/semconv/v1.21.0/doc.go b/semconv/v1.21.0/doc.go
index 7cf424855e9..0318b5ec48f 100644
--- a/semconv/v1.21.0/doc.go
+++ b/semconv/v1.21.0/doc.go
@@ -15,6 +15,6 @@
 // Package semconv implements OpenTelemetry semantic conventions.
 //
 // OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the conventions
-// as of the v1.21.0 version of the OpenTelemetry specification.
+// patterns for OpenTelemetry things. This package represents the v1.21.0
+// version of the OpenTelemetry semantic conventions.
 package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0"
diff --git a/semconv/v1.22.0/attribute_group.go b/semconv/v1.22.0/attribute_group.go
new file mode 100644
index 00000000000..88b436e0b27
--- /dev/null
+++ b/semconv/v1.22.0/attribute_group.go
@@ -0,0 +1,2490 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.22.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API does not expose a
+// clear notion of client and server). This also covers UDP network
+// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3)
+// and DNS.
+const (
+	// ClientAddressKey is the attribute Key conforming to the "client.address"
+	// semantic conventions. It represents the client address - domain name if
+	// available without reverse DNS lookup, otherwise IP address or Unix
+	// domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the server side, and when communicating through
+	// an intermediary, `client.address` SHOULD represent the client address
+	// behind any intermediaries (e.g. proxies) if it's available.
+	ClientAddressKey = attribute.Key("client.address")
+
+	// ClientPortKey is the attribute Key conforming to the "client.port"
+	// semantic conventions. It represents the client port number.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 65123
+	// Note: When observed from the server side, and when communicating through
+	// an intermediary, `client.port` SHOULD represent the client port behind
+	// any intermediaries (e.g. proxies) if it's available.
+	ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup, otherwise IP address or
+// Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+	return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+	return ClientPortKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+	// NetHostNameKey is the attribute Key conforming to the "net.host.name"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'example.com'
+	// Deprecated: use `server.address`.
+	NetHostNameKey = attribute.Key("net.host.name")
+
+	// NetHostPortKey is the attribute Key conforming to the "net.host.port"
+	// semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 8080
+	// Deprecated: use `server.port`.
+	NetHostPortKey = attribute.Key("net.host.port")
+
+	// NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'example.com'
+	// Deprecated: use `server.address` on client spans and `client.address` on
+	// server spans.
+	NetPeerNameKey = attribute.Key("net.peer.name")
+
+	// NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+	// semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 8080
+	// Deprecated: use `server.port` on client spans and `client.port` on
+	// server spans.
+	NetPeerPortKey = attribute.Key("net.peer.port")
+
+	// NetProtocolNameKey is the attribute Key conforming to the
+	// "net.protocol.name" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'amqp', 'http', 'mqtt'
+	// Deprecated: use `network.protocol.name`.
+	NetProtocolNameKey = attribute.Key("net.protocol.name")
+
+	// NetProtocolVersionKey is the attribute Key conforming to the
+	// "net.protocol.version" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '3.1.1'
+	// Deprecated: use `network.protocol.version`.
+	NetProtocolVersionKey = attribute.Key("net.protocol.version")
+
+	// NetSockFamilyKey is the attribute Key conforming to the
+	// "net.sock.family" semantic conventions.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Deprecated: use `network.transport` and `network.type`.
+	NetSockFamilyKey = attribute.Key("net.sock.family")
+
+	// NetSockHostAddrKey is the attribute Key conforming to the
+	// "net.sock.host.addr" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '/var/my.sock'
+	// Deprecated: use `network.local.address`.
+	NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+	// NetSockHostPortKey is the attribute Key conforming to the
+	// "net.sock.host.port" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 8080
+	// Deprecated: use `network.local.port`.
+	NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+	// NetSockPeerAddrKey is the attribute Key conforming to the
+	// "net.sock.peer.addr" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '192.168.0.1'
+	// Deprecated: use `network.peer.address`.
+	NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+	// NetSockPeerNameKey is the attribute Key conforming to the
+	// "net.sock.peer.name" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '/var/my.sock'
+	// Deprecated: no replacement at this time.
+	NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+	// NetSockPeerPortKey is the attribute Key conforming to the
+	// "net.sock.peer.port" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 65531
+	// Deprecated: use `network.peer.port`.
+	NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+	// NetTransportKey is the attribute Key conforming to the "net.transport"
+	// semantic conventions.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Deprecated: use `network.transport`.
+	NetTransportKey = attribute.Key("net.transport")
+)
+
+var (
+	// IPv4 address
+	//
+	// Deprecated: use `network.transport` and `network.type`.
+	NetSockFamilyInet = NetSockFamilyKey.String("inet")
+	// IPv6 address
+	//
+	// Deprecated: use `network.transport` and `network.type`.
+	NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+	// Unix domain socket path
+	//
+	// Deprecated: use `network.transport` and `network.type`.
+	NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+	// ip_tcp
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportTCP = NetTransportKey.String("ip_tcp")
+	// ip_udp
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportUDP = NetTransportKey.String("ip_udp")
+	// Named or anonymous pipe
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportPipe = NetTransportKey.String("pipe")
+	// In-process communication
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportInProc = NetTransportKey.String("inproc")
+	// Something else (non IP-based)
+	//
+	// Deprecated: use `network.transport`.
+	NetTransportOther = NetTransportKey.String("other")
+)
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions.
+//
+// Deprecated: use `server.address`.
+func NetHostName(val string) attribute.KeyValue {
+	return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions.
+//
+// Deprecated: use `server.port`.
+func NetHostPort(val int) attribute.KeyValue {
+	return NetHostPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions.
+//
+// Deprecated: use `server.address` on client spans and `client.address` on
+// server spans.
+func NetPeerName(val string) attribute.KeyValue {
+	return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions.
+//
+// Deprecated: use `server.port` on client spans and `client.port` on server
+// spans.
+func NetPeerPort(val int) attribute.KeyValue {
+	return NetPeerPortKey.Int(val)
+}
+
+// NetProtocolName returns an attribute KeyValue conforming to the
+// "net.protocol.name" semantic conventions.
+//
+// Deprecated: use `network.protocol.name`.
+func NetProtocolName(val string) attribute.KeyValue {
+	return NetProtocolNameKey.String(val)
+}
+
+// NetProtocolVersion returns an attribute KeyValue conforming to the
+// "net.protocol.version" semantic conventions.
+//
+// Deprecated: use `network.protocol.version`.
+func NetProtocolVersion(val string) attribute.KeyValue {
+	return NetProtocolVersionKey.String(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions.
+//
+// Deprecated: use `network.local.address`.
+func NetSockHostAddr(val string) attribute.KeyValue {
+	return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions.
+//
+// Deprecated: use `network.local.port`.
+func NetSockHostPort(val int) attribute.KeyValue {
+	return NetSockHostPortKey.Int(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions.
+//
+// Deprecated: use `network.peer.address`.
+func NetSockPeerAddr(val string) attribute.KeyValue {
+	return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions.
+//
+// Deprecated: no replacement at this time.
+func NetSockPeerName(val string) attribute.KeyValue {
+	return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions.
+//
+// Deprecated: use `network.peer.port`.
+func NetSockPeerPort(val int) attribute.KeyValue {
+	return NetSockPeerPortKey.Int(val)
+}
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API does not expose a clear notion
+// of client and server.
+const (
+	// DestinationAddressKey is the attribute Key conforming to the
+	// "destination.address" semantic conventions. It represents the
+	// destination address - domain name if available without reverse DNS
+	// lookup, otherwise IP address or Unix domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the source side, and when communicating through
+	// an intermediary, `destination.address` SHOULD represent the destination
+	// address behind any intermediaries (e.g. proxies) if it's available.
+	DestinationAddressKey = attribute.Key("destination.address")
+
+	// DestinationPortKey is the attribute Key conforming to the
+	// "destination.port" semantic conventions. It represents the destination
+	// port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3389, 2888
+	DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup, otherwise IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+	return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+	return DestinationPortKey.Int(val)
+}
+
+// The shared attributes used to report an error.
+const (
+	// ErrorTypeKey is the attribute Key conforming to the "error.type"
+	// semantic conventions. It represents the describes a class of error the
+	// operation ended with.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'timeout', 'java.net.UnknownHostException',
+	// 'server_certificate_invalid', '500'
+	// Note: The `error.type` SHOULD be predictable and SHOULD have low
+	// cardinality.
+	// Instrumentations SHOULD document the list of errors they report.
+	//
+	// The cardinality of `error.type` within one instrumentation library
+	// SHOULD be low, but
+	// telemetry consumers that aggregate data from multiple instrumentation
+	// libraries and applications
+	// should be prepared for `error.type` to have high cardinality at query
+	// time, when no
+	// additional filters are applied.
+	//
+	// If the operation has completed successfully, instrumentations SHOULD NOT
+	// set `error.type`.
+	//
+	// If a specific domain defines its own set of error codes (such as HTTP or
+	// gRPC status codes),
+	// it's RECOMMENDED to use a domain-specific attribute and also set
+	// `error.type` to capture
+	// all errors, regardless of whether they are defined within the
+	// domain-specific set or not.
+	ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+	// A fallback error value to be used when the instrumentation does not define a custom value for it
+	ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Describes FaaS attributes.
+const (
+	// FaaSInvokedNameKey is the attribute Key conforming to the
+	// "faas.invoked_name" semantic conventions. It represents the name of the
+	// invoked function.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'my-function'
+	// Note: SHOULD be equal to the `faas.name` resource attribute of the
+	// invoked function.
+	FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+	// FaaSInvokedProviderKey is the attribute Key conforming to the
+	// "faas.invoked_provider" semantic conventions. It represents the cloud
+	// provider of the invoked function.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+	// invoked function.
+	FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+	// FaaSInvokedRegionKey is the attribute Key conforming to the
+	// "faas.invoked_region" semantic conventions. It represents the cloud
+	// region of the invoked function.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (For some cloud providers, like
+	// AWS or GCP, the region in which a function is hosted is essential to
+	// uniquely identify the function and also part of its endpoint. Since it's
+	// part of the endpoint being called, the region is always known to
+	// clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+	// If the region is unknown to the client or not required for identifying
+	// the invoked function, setting `faas.invoked_region` is optional.)
+	// Stability: experimental
+	// Examples: 'eu-central-1'
+	// Note: SHOULD be equal to the `cloud.region` resource attribute of the
+	// invoked function.
+	FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+	// FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+	// semantic conventions. It represents the type of the trigger which caused
+	// this function invocation.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	FaaSTriggerKey = attribute.Key("faas.trigger")
+)
+
+var (
+	// Alibaba Cloud
+	FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+	// Microsoft Azure
+	FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+	// Google Cloud Platform
+	FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+	// Tencent Cloud
+	FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+	// A response to some data source operation such as a database or filesystem read/write
+	FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+	// To provide an answer to an inbound HTTP request
+	FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+	// A function is set to be executed when messages are sent to a messaging system
+	FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+	// A function is scheduled to be executed regularly
+	FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+	// If none of the others apply
+	FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+	return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+	return FaaSInvokedRegionKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+	// EventDomainKey is the attribute Key conforming to the "event.domain"
+	// semantic conventions. It represents the domain identifies the business
+	// context for the events.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Note: Events across different domains may have same `event.name`, yet be
+	// unrelated events.
+	EventDomainKey = attribute.Key("event.domain")
+
+	// EventNameKey is the attribute Key conforming to the "event.name"
+	// semantic conventions. It represents the name identifies the event.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'click', 'exception'
+	EventNameKey = attribute.Key("event.name")
+)
+
+var (
+	// Events from browser apps
+	EventDomainBrowser = EventDomainKey.String("browser")
+	// Events from mobile apps
+	EventDomainDevice = EventDomainKey.String("device")
+	// Events from Kubernetes
+	EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+	return EventNameKey.String(val)
+}
+
+// The attributes described in this section are rather generic. They may be
+// used in any Log Record they apply to.
+const (
+	// LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+	// semantic conventions. It represents a unique identifier for the Log
+	// Record.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+	// Note: If an id is provided, other log records with the same id will be
+	// considered duplicates and can be removed safely. This means, that two
+	// distinguishable log records MUST have different values.
+	// The id MAY be an [Universally Unique Lexicographically Sortable
+	// Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+	// (e.g. UUID) may be used as needed.
+	LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+	return LogRecordUIDKey.String(val)
+}
+
+// Describes Log attributes
+const (
+	// LogIostreamKey is the attribute Key conforming to the "log.iostream"
+	// semantic conventions. It represents the stream associated with the log.
+	// See below for a list of well-known values.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+	// Logs from stdout stream
+	LogIostreamStdout = LogIostreamKey.String("stdout")
+	// Events from stderr stream
+	LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// A file to which log was emitted.
+const (
+	// LogFileNameKey is the attribute Key conforming to the "log.file.name"
+	// semantic conventions. It represents the basename of the file.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'audit.log'
+	LogFileNameKey = attribute.Key("log.file.name")
+
+	// LogFileNameResolvedKey is the attribute Key conforming to the
+	// "log.file.name_resolved" semantic conventions. It represents the
+	// basename of the file, with symlinks resolved.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'uuid.log'
+	LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+	// LogFilePathKey is the attribute Key conforming to the "log.file.path"
+	// semantic conventions. It represents the full path to the file.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/var/log/mysql/audit.log'
+	LogFilePathKey = attribute.Key("log.file.path")
+
+	// LogFilePathResolvedKey is the attribute Key conforming to the
+	// "log.file.path_resolved" semantic conventions. It represents the full
+	// path to the file, with symlinks resolved.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/var/lib/docker/uuid.log'
+	LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+	return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+	return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+	return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+	return LogFilePathResolvedKey.String(val)
+}
+
+// Describes Database attributes
+const (
+	// PoolNameKey is the attribute Key conforming to the "pool.name" semantic
+	// conventions. It represents the name of the connection pool; unique
+	// within the instrumented application. In case the connection pool
+	// implementation does not provide a name, then the
+	// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+	// should be used
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'myDataSource'
+	PoolNameKey = attribute.Key("pool.name")
+
+	// StateKey is the attribute Key conforming to the "state" semantic
+	// conventions. It represents the state of a connection in the pool
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'idle'
+	StateKey = attribute.Key("state")
+)
+
+var (
+	// idle
+	StateIdle = StateKey.String("idle")
+	// used
+	StateUsed = StateKey.String("used")
+)
+
+// PoolName returns an attribute KeyValue conforming to the "pool.name"
+// semantic conventions. It represents the name of the connection pool; unique
+// within the instrumented application. In case the connection pool
+// implementation does not provide a name, then the
+// [db.connection_string](/docs/database/database-spans.md#connection-level-attributes)
+// should be used
+func PoolName(val string) attribute.KeyValue {
+	return PoolNameKey.String(val)
+}
+
+// Describes JVM buffer metric attributes.
+const (
+	// JvmBufferPoolNameKey is the attribute Key conforming to the
+	// "jvm.buffer.pool.name" semantic conventions. It represents the name of
+	// the buffer pool.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'mapped', 'direct'
+	// Note: Pool names are generally obtained via
+	// [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+	JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+	return JvmBufferPoolNameKey.String(val)
+}
+
+// Describes JVM memory metric attributes.
+const (
+	// JvmMemoryPoolNameKey is the attribute Key conforming to the
+	// "jvm.memory.pool.name" semantic conventions. It represents the name of
+	// the memory pool.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+	// Note: Pool names are generally obtained via
+	// [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+	JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+	// JvmMemoryTypeKey is the attribute Key conforming to the
+	// "jvm.memory.type" semantic conventions. It represents the type of
+	// memory.
+	//
+	// Type: Enum
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'heap', 'non_heap'
+	JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+)
+
+var (
+	// Heap memory
+	JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+	// Non-heap memory
+	JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+	return JvmMemoryPoolNameKey.String(val)
+}
+
+// Describes System metric attributes
+const (
+	// SystemDeviceKey is the attribute Key conforming to the "system.device"
+	// semantic conventions. It represents the device identifier
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '(identifier)'
+	SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+	return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU metric attributes
+const (
+	// SystemCPULogicalNumberKey is the attribute Key conforming to the
+	// "system.cpu.logical_number" semantic conventions. It represents the
+	// logical CPU number [0..n-1]
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1
+	SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+	// SystemCPUStateKey is the attribute Key conforming to the
+	// "system.cpu.state" semantic conventions. It represents the state of the
+	// CPU
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'idle', 'interrupt'
+	SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+	// user
+	SystemCPUStateUser = SystemCPUStateKey.String("user")
+	// system
+	SystemCPUStateSystem = SystemCPUStateKey.String("system")
+	// nice
+	SystemCPUStateNice = SystemCPUStateKey.String("nice")
+	// idle
+	SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+	// iowait
+	SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+	// interrupt
+	SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+	// steal
+	SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+	return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory metric attributes
+const (
+	// SystemMemoryStateKey is the attribute Key conforming to the
+	// "system.memory.state" semantic conventions. It represents the memory
+	// state
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'free', 'cached'
+	SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+	// total
+	SystemMemoryStateTotal = SystemMemoryStateKey.String("total")
+	// used
+	SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+	// free
+	SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+	// shared
+	SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+	// buffers
+	SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+	// cached
+	SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging metric attributes
+const (
+	// SystemPagingDirectionKey is the attribute Key conforming to the
+	// "system.paging.direction" semantic conventions. It represents the paging
+	// access direction
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'in'
+	SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+	// SystemPagingStateKey is the attribute Key conforming to the
+	// "system.paging.state" semantic conventions. It represents the memory
+	// paging state
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'free'
+	SystemPagingStateKey = attribute.Key("system.paging.state")
+
+	// SystemPagingTypeKey is the attribute Key conforming to the
+	// "system.paging.type" semantic conventions. It represents the memory
+	// paging type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'minor'
+	SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+	// in
+	SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+	// out
+	SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+	// used
+	SystemPagingStateUsed = SystemPagingStateKey.String("used")
+	// free
+	SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+	// major
+	SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+	// minor
+	SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes System Disk metric attributes
+const (
+	// SystemDiskDirectionKey is the attribute Key conforming to the
+	// "system.disk.direction" semantic conventions. It represents the disk
+	// operation direction
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'read'
+	SystemDiskDirectionKey = attribute.Key("system.disk.direction")
+)
+
+var (
+	// read
+	SystemDiskDirectionRead = SystemDiskDirectionKey.String("read")
+	// write
+	SystemDiskDirectionWrite = SystemDiskDirectionKey.String("write")
+)
+
+// Describes Filesystem metric attributes
+const (
+	// SystemFilesystemModeKey is the attribute Key conforming to the
+	// "system.filesystem.mode" semantic conventions. It represents the
+	// filesystem mode
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'rw, ro'
+	SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+	// SystemFilesystemMountpointKey is the attribute Key conforming to the
+	// "system.filesystem.mountpoint" semantic conventions. It represents the
+	// filesystem mount path
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/mnt/data'
+	SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+	// SystemFilesystemStateKey is the attribute Key conforming to the
+	// "system.filesystem.state" semantic conventions. It represents the
+	// filesystem state
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'used'
+	SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+	// SystemFilesystemTypeKey is the attribute Key conforming to the
+	// "system.filesystem.type" semantic conventions. It represents the
+	// filesystem type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ext4'
+	SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+	// used
+	SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+	// free
+	SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+	// reserved
+	SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+	// fat32
+	SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+	// exfat
+	SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+	// ntfs
+	SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+	// refs
+	SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+	// hfsplus
+	SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+	// ext4
+	SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+	return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+	return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network metric attributes
+const (
+	// SystemNetworkDirectionKey is the attribute Key conforming to the
+	// "system.network.direction" semantic conventions. It represents the
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'transmit'
+	SystemNetworkDirectionKey = attribute.Key("system.network.direction")
+
+	// SystemNetworkStateKey is the attribute Key conforming to the
+	// "system.network.state" semantic conventions. It represents a stateless
+	// protocol MUST NOT set this attribute
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'close_wait'
+	SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+	// transmit
+	SystemNetworkDirectionTransmit = SystemNetworkDirectionKey.String("transmit")
+	// receive
+	SystemNetworkDirectionReceive = SystemNetworkDirectionKey.String("receive")
+)
+
+var (
+	// close
+	SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+	// close_wait
+	SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+	// closing
+	SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+	// delete
+	SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+	// established
+	SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+	// fin_wait_1
+	SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+	// fin_wait_2
+	SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+	// last_ack
+	SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+	// listen
+	SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+	// syn_recv
+	SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+	// syn_sent
+	SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+	// time_wait
+	SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process metric attributes
+const (
+	// SystemProcessesStatusKey is the attribute Key conforming to the
+	// "system.processes.status" semantic conventions. It represents the
+	// process state, e.g., [Linux Process State
+	// Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'running'
+	SystemProcessesStatusKey = attribute.Key("system.processes.status")
+)
+
+var (
+	// running
+	SystemProcessesStatusRunning = SystemProcessesStatusKey.String("running")
+	// sleeping
+	SystemProcessesStatusSleeping = SystemProcessesStatusKey.String("sleeping")
+	// stopped
+	SystemProcessesStatusStopped = SystemProcessesStatusKey.String("stopped")
+	// defunct
+	SystemProcessesStatusDefunct = SystemProcessesStatusKey.String("defunct")
+)
+
+// These attributes may be used for any network related operation.
+const (
+	// NetworkLocalAddressKey is the attribute Key conforming to the
+	// "network.local.address" semantic conventions. It represents the local
+	// address of the network connection - IP address or Unix domain socket
+	// name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '10.1.2.80', '/tmp/my.sock'
+	NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+	// NetworkLocalPortKey is the attribute Key conforming to the
+	// "network.local.port" semantic conventions. It represents the local port
+	// number of the network connection.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 65123
+	NetworkLocalPortKey = attribute.Key("network.local.port")
+
+	// NetworkPeerAddressKey is the attribute Key conforming to the
+	// "network.peer.address" semantic conventions. It represents the peer
+	// address of the network connection - IP address or Unix domain socket
+	// name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '10.1.2.80', '/tmp/my.sock'
+	NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+	// NetworkPeerPortKey is the attribute Key conforming to the
+	// "network.peer.port" semantic conventions. It represents the peer port
+	// number of the network connection.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 65123
+	NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+	// NetworkProtocolNameKey is the attribute Key conforming to the
+	// "network.protocol.name" semantic conventions. It represents the [OSI
+	// application layer](https://osi-model.com/application-layer/) or non-OSI
+	// equivalent.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'amqp', 'http', 'mqtt'
+	// Note: The value SHOULD be normalized to lowercase.
+	NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+	// NetworkProtocolVersionKey is the attribute Key conforming to the
+	// "network.protocol.version" semantic conventions. It represents the
+	// version of the protocol specified in `network.protocol.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '3.1.1'
+	// Note: `network.protocol.version` refers to the version of the protocol
+	// used and might be different from the protocol client's version. If the
+	// HTTP client used has a version of `0.27.2`, but sends HTTP version
+	// `1.1`, this attribute should be set to `1.1`.
+	NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+	// NetworkTransportKey is the attribute Key conforming to the
+	// "network.transport" semantic conventions. It represents the [OSI
+	// transport layer](https://osi-model.com/transport-layer/) or
+	// [inter-process communication
+	// method](https://en.wikipedia.org/wiki/Inter-process_communication).
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'tcp', 'udp'
+	// Note: The value SHOULD be normalized to lowercase.
+	//
+	// Consider always setting the transport when setting a port number, since
+	// a port number is ambiguous without knowing the transport, for example
+	// different processes could be listening on TCP port 12345 and UDP port
+	// 12345.
+	NetworkTransportKey = attribute.Key("network.transport")
+
+	// NetworkTypeKey is the attribute Key conforming to the "network.type"
+	// semantic conventions. It represents the [OSI network
+	// layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ipv4', 'ipv6'
+	// Note: The value SHOULD be normalized to lowercase.
+	NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+	// TCP
+	NetworkTransportTCP = NetworkTransportKey.String("tcp")
+	// UDP
+	NetworkTransportUDP = NetworkTransportKey.String("udp")
+	// Named or anonymous pipe. See note below
+	NetworkTransportPipe = NetworkTransportKey.String("pipe")
+	// Unix domain socket
+	NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+	// IPv4
+	NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+	// IPv6
+	NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+	return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+	return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+	return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+	return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+	return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the version
+// of the protocol specified in `network.protocol.name`.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+	return NetworkProtocolVersionKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+	// NetworkCarrierIccKey is the attribute Key conforming to the
+	// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+	// alpha-2 2-character country code associated with the mobile carrier
+	// network.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'DE'
+	NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+	// NetworkCarrierMccKey is the attribute Key conforming to the
+	// "network.carrier.mcc" semantic conventions. It represents the mobile
+	// carrier country code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '310'
+	NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+	// NetworkCarrierMncKey is the attribute Key conforming to the
+	// "network.carrier.mnc" semantic conventions. It represents the mobile
+	// carrier network code.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '001'
+	NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+	// NetworkCarrierNameKey is the attribute Key conforming to the
+	// "network.carrier.name" semantic conventions. It represents the name of
+	// the mobile carrier.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'sprint'
+	NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+	// NetworkConnectionSubtypeKey is the attribute Key conforming to the
+	// "network.connection.subtype" semantic conventions. It represents the
+	// this describes more details regarding the connection.type. It may be the
+	// type of cell technology connection, but it could be used for describing
+	// details about a wifi connection.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'LTE'
+	NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+	// NetworkConnectionTypeKey is the attribute Key conforming to the
+	// "network.connection.type" semantic conventions. It represents the
+	// internet connection type.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'wifi'
+	NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+)
+
+var (
+	// GPRS
+	NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+	// EDGE
+	NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+	// UMTS
+	NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+	// CDMA
+	NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+	// EVDO Rel. 0
+	NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+	// EVDO Rev. A
+	NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+	// CDMA2000 1XRTT
+	NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+	// HSDPA
+	NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+	// HSUPA
+	NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+	// HSPA
+	NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+	// IDEN
+	NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+	// EVDO Rev. B
+	NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+	// LTE
+	NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+	// EHRPD
+	NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+	// HSPAP
+	NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+	// GSM
+	NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+	// TD-SCDMA
+	NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+	// IWLAN
+	NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+	// 5G NR (New Radio)
+	NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+	// 5G NRNSA (New Radio Non-Standalone)
+	NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+	// LTE CA
+	NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+	// wifi
+	NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+	// wired
+	NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+	// cell
+	NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+	// unavailable
+	NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+	// unknown
+	NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+	return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+	return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+	return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+	return NetworkCarrierNameKey.String(val)
+}
+
+// Describes deprecated HTTP attributes.
+const (
+	// HTTPMethodKey is the attribute Key conforming to the "http.method"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'GET', 'POST', 'HEAD'
+	// Deprecated: use `http.request.method` instead.
+	HTTPMethodKey = attribute.Key("http.method")
+
+	// HTTPRequestContentLengthKey is the attribute Key conforming to the
+	// "http.request_content_length" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 3495
+	// Deprecated: use `http.request.body.size` instead.
+	HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+	// HTTPResponseContentLengthKey is the attribute Key conforming to the
+	// "http.response_content_length" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 3495
+	// Deprecated: use `http.response.body.size` instead.
+	HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+
+	// HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'http', 'https'
+	// Deprecated: use `url.scheme` instead.
+	HTTPSchemeKey = attribute.Key("http.scheme")
+
+	// HTTPStatusCodeKey is the attribute Key conforming to the
+	// "http.status_code" semantic conventions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 200
+	// Deprecated: use `http.response.status_code` instead.
+	HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+	// HTTPTargetKey is the attribute Key conforming to the "http.target"
+	// semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '/search?q=OpenTelemetry#SemConv'
+	// Deprecated: use `url.path` and `url.query` instead.
+	HTTPTargetKey = attribute.Key("http.target")
+
+	// HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+	// conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+	// Deprecated: use `url.full` instead.
+	HTTPURLKey = attribute.Key("http.url")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions.
+//
+// Deprecated: use `http.request.method` instead.
+func HTTPMethod(val string) attribute.KeyValue {
+	return HTTPMethodKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions.
+//
+// Deprecated: use `http.request.body.size` instead.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+	return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions.
+//
+// Deprecated: use `http.response.body.size` instead.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+	return HTTPResponseContentLengthKey.Int(val)
+}
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions.
+//
+// Deprecated: use `url.scheme` instead.
+func HTTPScheme(val string) attribute.KeyValue {
+	return HTTPSchemeKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions.
+//
+// Deprecated: use `http.response.status_code` instead.
+func HTTPStatusCode(val int) attribute.KeyValue {
+	return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions.
+//
+// Deprecated: use `url.path` and `url.query` instead.
+func HTTPTarget(val string) attribute.KeyValue {
+	return HTTPTargetKey.String(val)
+}
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions.
+//
+// Deprecated: use `url.full` instead.
+func HTTPURL(val string) attribute.KeyValue {
+	return HTTPURLKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+	// HTTPRequestBodySizeKey is the attribute Key conforming to the
+	// "http.request.body.size" semantic conventions. It represents the size of
+	// the request payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3495
+	HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+	// HTTPRequestMethodKey is the attribute Key conforming to the
+	// "http.request.method" semantic conventions. It represents the hTTP
+	// request method.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'GET', 'POST', 'HEAD'
+	// Note: HTTP request method value SHOULD be "known" to the
+	// instrumentation.
+	// By default, this convention defines "known" methods as the ones listed
+	// in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+	// and the PATCH method defined in
+	// [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+	//
+	// If the HTTP request method is not known to instrumentation, it MUST set
+	// the `http.request.method` attribute to `_OTHER`.
+	//
+	// If the HTTP instrumentation could end up converting valid HTTP request
+	// methods to `_OTHER`, then it MUST provide a way to override
+	// the list of known HTTP methods. If this override is done via environment
+	// variable, then the environment variable MUST be named
+	// OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+	// list of case-sensitive known HTTP methods
+	// (this list MUST be a full override of the default known method, it is
+	// not a list of known methods in addition to the defaults).
+	//
+	// HTTP method names are case-sensitive and `http.request.method` attribute
+	// value MUST match a known HTTP method name exactly.
+	// Instrumentations for specific web frameworks that consider HTTP methods
+	// to be case insensitive, SHOULD populate a canonical equivalent.
+	// Tracing instrumentations that do so, MUST also set
+	// `http.request.method_original` to the original value.
+	HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+	// HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+	// "http.request.method_original" semantic conventions. It represents the
+	// original HTTP method sent by the client in the request line.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'GeT', 'ACL', 'foo'
+	HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+	// HTTPResendCountKey is the attribute Key conforming to the
+	// "http.resend_count" semantic conventions. It represents the ordinal
+	// number of request resending attempt (for any reason, including
+	// redirects).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3
+	// Note: The resend count SHOULD be updated each time an HTTP request gets
+	// resent by the client, regardless of what was the cause of the resending
+	// (e.g. redirection, authorization failure, 503 Server Unavailable,
+	// network issues, or any other).
+	HTTPResendCountKey = attribute.Key("http.resend_count")
+
+	// HTTPResponseBodySizeKey is the attribute Key conforming to the
+	// "http.response.body.size" semantic conventions. It represents the size
+	// of the response payload body in bytes. This is the number of bytes
+	// transferred excluding headers and is often, but not always, present as
+	// the
+	// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+	// header. For requests using transport encoding, this should be the
+	// compressed size.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3495
+	HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+	// HTTPResponseStatusCodeKey is the attribute Key conforming to the
+	// "http.response.status_code" semantic conventions. It represents the
+	// [HTTP response status
+	// code](https://tools.ietf.org/html/rfc7231#section-6).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 200
+	HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+	// HTTPRouteKey is the attribute Key conforming to the "http.route"
+	// semantic conventions. It represents the matched route (path template in
+	// the format used by the respective server framework). See note below
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+	// Note: MUST NOT be populated when this is not supported by the HTTP
+	// server framework as the route attribute should have low-cardinality and
+	// the URI path can NOT substitute it.
+	// SHOULD include the [application
+	// root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+	HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+	// CONNECT method
+	HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+	// DELETE method
+	HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+	// GET method
+	HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+	// HEAD method
+	HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+	// OPTIONS method
+	HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+	// PATCH method
+	HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+	// POST method
+	HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+	// PUT method
+	HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+	// TRACE method
+	HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+	// Any HTTP method that the instrumentation has no prior knowledge of
+	HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+	return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+	return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+	return HTTPResendCountKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+	return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+	return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+	return HTTPRouteKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API does not expose a
+// clear notion of client and server). This also covers UDP network
+// interactions where one side initiates the interaction, e.g. QUIC (HTTP/3)
+// and DNS.
+const (
+	// ServerAddressKey is the attribute Key conforming to the "server.address"
+	// semantic conventions. It represents the server address - domain name if
+	// available without reverse DNS lookup, otherwise IP address or Unix
+	// domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the client side, and when communicating through
+	// an intermediary, `server.address` SHOULD represent
+	// the server address behind any intermediaries (e.g. proxies) if it's
+	// available.
+	ServerAddressKey = attribute.Key("server.address")
+
+	// ServerPortKey is the attribute Key conforming to the "server.port"
+	// semantic conventions. It represents the server port number.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 80, 8080, 443
+	// Note: When observed from the client side, and when communicating through
+	// an intermediary, `server.port` SHOULD represent the server port behind
+	// any intermediaries (e.g. proxies) if it's available.
+	ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server address -
+// domain name if available without reverse DNS lookup, otherwise IP address or
+// Unix domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+	return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+	return ServerPortKey.Int(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+const (
+	// SessionIDKey is the attribute Key conforming to the "session.id"
+	// semantic conventions. It represents a unique id to identify a session.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '00112233-4455-6677-8899-aabbccddeeff'
+	SessionIDKey = attribute.Key("session.id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+	return SessionIDKey.String(val)
+}
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API does not expose a clear notion
+// of client and server.
+const (
+	// SourceAddressKey is the attribute Key conforming to the "source.address"
+	// semantic conventions. It represents the source address - domain name if
+	// available without reverse DNS lookup, otherwise IP address or Unix
+	// domain socket name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+	// Note: When observed from the destination side, and when communicating
+	// through an intermediary, `source.address` SHOULD represent the source
+	// address behind any intermediaries (e.g. proxies) if it's available.
+	SourceAddressKey = attribute.Key("source.address")
+
+	// SourcePortKey is the attribute Key conforming to the "source.port"
+	// semantic conventions. It represents the source port number
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3389, 2888
+	SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup, otherwise IP address or
+// Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+	return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+	return SourcePortKey.Int(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+	// MessagingMessageBodySizeKey is the attribute Key conforming to the
+	// "messaging.message.body.size" semantic conventions. It represents the
+	// size of the message body in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1439
+	// Note: This can refer to both the compressed or uncompressed body size.
+	// If both sizes are known, the uncompressed
+	// body size should be used.
+	MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+	// MessagingMessageConversationIDKey is the attribute Key conforming to the
+	// "messaging.message.conversation_id" semantic conventions. It represents
+	// the [conversation ID](#conversations) identifying the conversation to
+	// which the message belongs, represented as a string. Sometimes called
+	// "Correlation ID".
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MyConversationID'
+	MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+	// MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+	// "messaging.message.envelope.size" semantic conventions. It represents
+	// the size of the message body and metadata in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 2738
+	// Note: This can refer to both the compressed or uncompressed size. If
+	// both sizes are known, the uncompressed
+	// size should be used.
+	MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+	// MessagingMessageIDKey is the attribute Key conforming to the
+	// "messaging.message.id" semantic conventions. It represents a value used
+	// by the messaging system as an identifier for the message, represented as
+	// a string.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+	MessagingMessageIDKey = attribute.Key("messaging.message.id")
+)
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+	return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+	return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+	return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+	return MessagingMessageIDKey.String(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+	// MessagingDestinationAnonymousKey is the attribute Key conforming to the
+	// "messaging.destination.anonymous" semantic conventions. It represents a
+	// boolean that is true if the message destination is anonymous (could be
+	// unnamed or have auto-generated name).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+	// MessagingDestinationNameKey is the attribute Key conforming to the
+	// "messaging.destination.name" semantic conventions. It represents the
+	// message destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MyQueue', 'MyTopic'
+	// Note: Destination name SHOULD uniquely identify a specific queue, topic
+	// or other entity within the broker. If
+	// the broker does not have such notion, the destination name SHOULD
+	// uniquely identify the broker.
+	MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+	// MessagingDestinationTemplateKey is the attribute Key conforming to the
+	// "messaging.destination.template" semantic conventions. It represents the
+	// low cardinality representation of the messaging destination name
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/customers/{customerID}'
+	// Note: Destination names could be constructed from templates. An example
+	// would be a destination name involving a user name or product id.
+	// Although the destination name in this case is of high cardinality, the
+	// underlying template is of low cardinality and can be effectively used
+	// for grouping and aggregation.
+	MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+	// MessagingDestinationTemporaryKey is the attribute Key conforming to the
+	// "messaging.destination.temporary" semantic conventions. It represents a
+	// boolean that is true if the message destination is temporary and might
+	// not exist anymore after messages are processed.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+)
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+	return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+	return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+	return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+	return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe the publish messaging
+// destination on broker. The term Publish Destination refers to the
+// destination the message was originally published to. These attributes should
+// be used on the consumer side when information about the publish destination
+// is available and different than the destination message are consumed from.
+const (
+	// MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+	// to the "messaging.destination_publish.anonymous" semantic conventions.
+	// It represents a boolean that is true if the publish message destination
+	// is anonymous (could be unnamed or have auto-generated name).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+	// MessagingDestinationPublishNameKey is the attribute Key conforming to
+	// the "messaging.destination_publish.name" semantic conventions. It
+	// represents the name of the original destination the message was
+	// published to
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MyQueue', 'MyTopic'
+	// Note: The name SHOULD uniquely identify a specific queue, topic, or
+	// other entity within the broker. If
+	// the broker does not have such notion, the original destination name
+	// SHOULD uniquely identify the broker.
+	MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+)
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+	return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+	return MessagingDestinationPublishNameKey.String(val)
+}
+
+// Attributes for RabbitMQ
+const (
+	// MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+	// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+	// conventions. It represents the rabbitMQ message routing key.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If not empty.)
+	// Stability: experimental
+	// Examples: 'myKey'
+	MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+	return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+	// MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+	// "messaging.kafka.consumer.group" semantic conventions. It represents the
+	// name of the Kafka Consumer Group that is handling the message. Only
+	// applies to consumers, not producers.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-group'
+	MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+	// MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+	// the "messaging.kafka.destination.partition" semantic conventions. It
+	// represents the partition the message is sent to.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 2
+	MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+	// MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+	// "messaging.kafka.message.key" semantic conventions. It represents the
+	// message keys in Kafka are used for grouping alike messages to ensure
+	// they're processed on the same partition. They differ from
+	// `messaging.message.id` in that they're not unique. If the key is `null`,
+	// the attribute MUST NOT be set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myKey'
+	// Note: If the key type is not string, it's string representation has to
+	// be supplied for the attribute. If the key has no unambiguous, canonical
+	// string form, don't include its value.
+	MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+	// MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+	// "messaging.kafka.message.offset" semantic conventions. It represents the
+	// offset of a record in the corresponding Kafka partition.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 42
+	MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+	// MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+	// "messaging.kafka.message.tombstone" semantic conventions. It represents
+	// a boolean that is true if the message is a tombstone.
+	//
+	// Type: boolean
+	// RequirementLevel: ConditionallyRequired (If value is `true`. When
+	// missing, the value is assumed to be `false`.)
+	// Stability: experimental
+	MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+	return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+	return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+	return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+	return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+	return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+	// MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.client_group" semantic conventions. It represents
+	// the name of the RocketMQ producer/consumer group that is handling the
+	// message. The client type is identified by the SpanKind.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'myConsumerGroup'
+	MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+	// MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+	// the "messaging.rocketmq.consumption_model" semantic conventions. It
+	// represents the model of message consumption. This only applies to
+	// consumer spans.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+	// MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+	// conventions. It represents the delay time level for delay message, which
+	// determines the message delay time.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If the message type is delay
+	// and delivery timestamp is not specified.)
+	// Stability: experimental
+	// Examples: 3
+	MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+	// MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+	// conforming to the "messaging.rocketmq.message.delivery_timestamp"
+	// semantic conventions. It represents the timestamp in milliseconds that
+	// the delay message is expected to be delivered to consumer.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If the message type is delay
+	// and delay time level is not specified.)
+	// Stability: experimental
+	// Examples: 1665987217045
+	MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+	// MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.group" semantic conventions. It represents
+	// the it is essential for FIFO message. Messages that belong to the same
+	// message group are always processed one by one within the same consumer
+	// group.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+	// Stability: experimental
+	// Examples: 'myMessageGroup'
+	MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+	// MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.keys" semantic conventions. It represents
+	// the key(s) of message, another way to mark message besides message id.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'keyA', 'keyB'
+	MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+	// MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+	// secondary classifier of message besides topic.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'tagA'
+	MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+	// MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+	// "messaging.rocketmq.message.type" semantic conventions. It represents
+	// the type of message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+	// MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+	// "messaging.rocketmq.namespace" semantic conventions. It represents the
+	// namespace of RocketMQ resources, resources in different namespaces are
+	// individual.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'myNamespace'
+	MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+)
+
+var (
+	// Clustering consumption model
+	MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+	// Broadcasting consumption model
+	MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+	// Normal message
+	MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+	// FIFO message
+	MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+	// Delay message
+	MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+	// Transaction message
+	MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+	return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+	return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+	return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+	return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+	// URLFragmentKey is the attribute Key conforming to the "url.fragment"
+	// semantic conventions. It represents the [URI
+	// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'SemConv'
+	URLFragmentKey = attribute.Key("url.fragment")
+
+	// URLFullKey is the attribute Key conforming to the "url.full" semantic
+	// conventions. It represents the absolute URL describing a network
+	// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+	// '//localhost'
+	// Note: For network calls, URL usually has
+	// `scheme://host[:port][path][?query][#fragment]` format, where the
+	// fragment is not transmitted over HTTP, but if it is known, it should be
+	// included nevertheless.
+	// `url.full` MUST NOT contain credentials passed via URL in form of
+	// `https://username:password@www.example.com/`. In such case username and
+	// password should be redacted and attribute's value should be
+	// `https://REDACTED:REDACTED@www.example.com/`.
+	// `url.full` SHOULD capture the absolute URL when it is available (or can
+	// be reconstructed) and SHOULD NOT be validated or modified except for
+	// sanitizing purposes.
+	URLFullKey = attribute.Key("url.full")
+
+	// URLPathKey is the attribute Key conforming to the "url.path" semantic
+	// conventions. It represents the [URI
+	// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/search'
+	// Note: When missing, the value is assumed to be `/`
+	URLPathKey = attribute.Key("url.path")
+
+	// URLQueryKey is the attribute Key conforming to the "url.query" semantic
+	// conventions. It represents the [URI
+	// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'q=OpenTelemetry'
+	// Note: Sensitive content provided in query string SHOULD be scrubbed when
+	// instrumentations can identify it.
+	URLQueryKey = attribute.Key("url.query")
+
+	// URLSchemeKey is the attribute Key conforming to the "url.scheme"
+	// semantic conventions. It represents the [URI
+	// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+	// identifying the used protocol.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'https', 'ftp', 'telnet'
+	URLSchemeKey = attribute.Key("url.scheme")
+)
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+	return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+	return URLFullKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+	return URLPathKey.String(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+	return URLQueryKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+	return URLSchemeKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+	// UserAgentOriginalKey is the attribute Key conforming to the
+	// "user_agent.original" semantic conventions. It represents the value of
+	// the [HTTP
+	// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+	// header sent by the client.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+	UserAgentOriginalKey = attribute.Key("user_agent.original")
+)
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+	return UserAgentOriginalKey.String(val)
+}
diff --git a/exporters/otlp/otlptrace/internal/envconfig/doc.go b/semconv/v1.22.0/doc.go
similarity index 64%
rename from exporters/otlp/otlptrace/internal/envconfig/doc.go
rename to semconv/v1.22.0/doc.go
index f2887ee8ffb..6f91d345dc4 100644
--- a/exporters/otlp/otlptrace/internal/envconfig/doc.go
+++ b/semconv/v1.22.0/doc.go
@@ -12,9 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package envconfig contains common functionality for all OTLP trace exporters
-// to handle environment variable configuration.
+// Package semconv implements OpenTelemetry semantic conventions.
 //
-// Deprecated: package envconfig exists for historical compatibility, it should
-// not be used.
-package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig"
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.22.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.22.0"
diff --git a/semconv/v1.22.0/event.go b/semconv/v1.22.0/event.go
new file mode 100644
index 00000000000..68d76556fa8
--- /dev/null
+++ b/semconv/v1.22.0/event.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.22.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+	// FeatureFlagKeyKey is the attribute Key conforming to the
+	// "feature_flag.key" semantic conventions. It represents the unique
+	// identifier of the feature flag.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'logo-color'
+	FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+	// FeatureFlagProviderNameKey is the attribute Key conforming to the
+	// "feature_flag.provider_name" semantic conventions. It represents the
+	// name of the service provider that performs the flag evaluation.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'Flag Manager'
+	FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+	// FeatureFlagVariantKey is the attribute Key conforming to the
+	// "feature_flag.variant" semantic conventions. It represents the sHOULD be
+	// a semantic identifier for a value. If one is unavailable, a stringified
+	// version of the value can be used.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'red', 'true', 'on'
+	// Note: A semantic identifier, commonly referred to as a variant, provides
+	// a means
+	// for referring to a value without including the value itself. This can
+	// provide additional context for understanding the meaning behind a value.
+	// For example, the variant `red` maybe be used for the value `#c05543`.
+	//
+	// A stringified version of the value can be used in situations where a
+	// semantic identifier is unavailable. String representation of the value
+	// should be determined by the implementer.
+	FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+	return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+	return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+	return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+	// MessageCompressedSizeKey is the attribute Key conforming to the
+	// "message.compressed_size" semantic conventions. It represents the
+	// compressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+	// MessageIDKey is the attribute Key conforming to the "message.id"
+	// semantic conventions. It represents the mUST be calculated as two
+	// different counters starting from `1` one for sent messages and one for
+	// received message.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: This way we guarantee that the values will be consistent between
+	// different implementations.
+	MessageIDKey = attribute.Key("message.id")
+
+	// MessageTypeKey is the attribute Key conforming to the "message.type"
+	// semantic conventions. It represents the whether this is a received or
+	// sent message.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessageTypeKey = attribute.Key("message.type")
+
+	// MessageUncompressedSizeKey is the attribute Key conforming to the
+	// "message.uncompressed_size" semantic conventions. It represents the
+	// uncompressed size of the message in bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+	// sent
+	MessageTypeSent = MessageTypeKey.String("SENT")
+	// received
+	MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+	return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+	return MessageIDKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+	return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+	// ExceptionEscapedKey is the attribute Key conforming to the
+	// "exception.escaped" semantic conventions. It represents the sHOULD be
+	// set to true if the exception event is recorded at a point where it is
+	// known that the exception is escaping the scope of the span.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: An exception is considered to have escaped (or left) the scope of
+	// a span,
+	// if that span is ended while the exception is still logically "in
+	// flight".
+	// This may be actually "in flight" in some languages (e.g. if the
+	// exception
+	// is passed to a Context manager's `__exit__` method in Python) but will
+	// usually be caught at the point of recording the exception in most
+	// languages.
+	//
+	// It is usually not possible to determine at the point where an exception
+	// is thrown
+	// whether it will escape the scope of a span.
+	// However, it is trivial to know that an exception
+	// will escape, if one checks for an active exception just before ending
+	// the span,
+	// as done in the [example above](#recording-an-exception).
+	//
+	// It follows that an exception may still escape the scope of the span
+	// even if the `exception.escaped` attribute was not set or set to false,
+	// since the event might have been recorded at a time where it was not
+	// clear whether the exception will escape.
+	ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+	return ExceptionEscapedKey.Bool(val)
+}
diff --git a/exporters/otlp/otlptrace/internal/header_test.go b/semconv/v1.22.0/exception.go
similarity index 75%
rename from exporters/otlp/otlptrace/internal/header_test.go
rename to semconv/v1.22.0/exception.go
index d93340fc0d6..9f1013f0b69 100644
--- a/exporters/otlp/otlptrace/internal/header_test.go
+++ b/semconv/v1.22.0/exception.go
@@ -12,14 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package internal
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.22.0"
 
-import (
-	"testing"
-
-	"github.com/stretchr/testify/require"
+const (
+	// ExceptionEventName is the name of the Span event representing an exception.
+	ExceptionEventName = "exception"
 )
-
-func TestGetUserAgentHeader(t *testing.T) {
-	require.Regexp(t, "OTel OTLP Exporter Go/1\\..*", GetUserAgentHeader())
-}
diff --git a/semconv/v1.22.0/resource.go b/semconv/v1.22.0/resource.go
new file mode 100644
index 00000000000..65c1ca8c0a3
--- /dev/null
+++ b/semconv/v1.22.0/resource.go
@@ -0,0 +1,2568 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.22.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The Android platform on which the Android application is running.
+const (
+	// AndroidOSAPILevelKey is the attribute Key conforming to the
+	// "android.os.api_level" semantic conventions. It represents the uniquely
+	// identifies the framework API revision offered by a version
+	// (`os.version`) of the android operating system. More information can be
+	// found
+	// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '33', '32'
+	AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+	return AndroidOSAPILevelKey.String(val)
+}
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+	// BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+	// semantic conventions. It represents the array of brand name and version
+	// separated by a space
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.brands`).
+	BrowserBrandsKey = attribute.Key("browser.brands")
+
+	// BrowserLanguageKey is the attribute Key conforming to the
+	// "browser.language" semantic conventions. It represents the preferred
+	// language of the user using the browser
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'en', 'en-US', 'fr', 'fr-FR'
+	// Note: This value is intended to be taken from the Navigator API
+	// `navigator.language`.
+	BrowserLanguageKey = attribute.Key("browser.language")
+
+	// BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+	// semantic conventions. It represents a boolean that is true if the
+	// browser is running on a mobile device
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.mobile`). If unavailable, this attribute
+	// SHOULD be left unset.
+	BrowserMobileKey = attribute.Key("browser.mobile")
+
+	// BrowserPlatformKey is the attribute Key conforming to the
+	// "browser.platform" semantic conventions. It represents the platform on
+	// which the browser is running
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Windows', 'macOS', 'Android'
+	// Note: This value is intended to be taken from the [UA client hints
+	// API](https://wicg.github.io/ua-client-hints/#interface)
+	// (`navigator.userAgentData.platform`). If unavailable, the legacy
+	// `navigator.platform` API SHOULD NOT be used instead and this attribute
+	// SHOULD be left unset in order for the values to be consistent.
+	// The list of possible values is defined in the [W3C User-Agent Client
+	// Hints
+	// specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+	// Note that some (but not all) of these values can overlap with values in
+	// the [`os.type` and `os.name` attributes](./os.md). However, for
+	// consistency, the values in the `browser.platform` attribute should
+	// capture the exact value that the user agent provides.
+	BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+	return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+	return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+	return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+	return BrowserPlatformKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+	// CloudAccountIDKey is the attribute Key conforming to the
+	// "cloud.account.id" semantic conventions. It represents the cloud account
+	// ID the resource is assigned to.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '111111111111', 'opentelemetry'
+	CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+	// CloudAvailabilityZoneKey is the attribute Key conforming to the
+	// "cloud.availability_zone" semantic conventions. It represents the cloud
+	// regions often have multiple, isolated locations known as zones to
+	// increase availability. Availability zone represents the zone where the
+	// resource is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'us-east-1c'
+	// Note: Availability zones are called "zones" on Alibaba Cloud and Google
+	// Cloud.
+	CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+	// CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+	// semantic conventions. It represents the cloud platform in use.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: The prefix of the service SHOULD match the one specified in
+	// `cloud.provider`.
+	CloudPlatformKey = attribute.Key("cloud.platform")
+
+	// CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+	// semantic conventions. It represents the name of the cloud provider.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	CloudProviderKey = attribute.Key("cloud.provider")
+
+	// CloudRegionKey is the attribute Key conforming to the "cloud.region"
+	// semantic conventions. It represents the geographical region the resource
+	// is running.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'us-central1', 'us-east-1'
+	// Note: Refer to your provider's docs to see the available regions, for
+	// example [Alibaba Cloud
+	// regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+	// regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+	// [Azure
+	// regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+	// [Google Cloud regions](https://cloud.google.com/about/locations), or
+	// [Tencent Cloud
+	// regions](https://www.tencentcloud.com/document/product/213/6091).
+	CloudRegionKey = attribute.Key("cloud.region")
+
+	// CloudResourceIDKey is the attribute Key conforming to the
+	// "cloud.resource_id" semantic conventions. It represents the cloud
+	// provider-specific native identifier of the monitored cloud resource
+	// (e.g. an
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+	// on AWS, a [fully qualified resource
+	// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+	// on Azure, a [full resource
+	// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+	// on GCP)
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+	// '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+	// '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
+	// Note: On some cloud providers, it may not be possible to determine the
+	// full ID at startup,
+	// so it may be necessary to set `cloud.resource_id` as a span attribute
+	// instead.
+	//
+	// The exact value to use for `cloud.resource_id` depends on the cloud
+	// provider.
+	// The following well-known definitions MUST be used if you set this
+	// attribute and they apply:
+	//
+	// * **AWS Lambda:** The function
+	// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+	//   Take care not to use the "invoked ARN" directly but replace any
+	//   [alias
+	// suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+	//   with the resolved function version, as the same runtime instance may
+	// be invokable with
+	//   multiple different aliases.
+	// * **GCP:** The [URI of the
+	// resource](https://cloud.google.com/iam/docs/full-resource-names)
+	// * **Azure:** The [Fully Qualified Resource
+	// ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+	// of the invoked function,
+	//   *not* the function app, having the form
+	// `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+	//   This means that a span attribute MUST be used, as an Azure function
+	// app can host multiple functions that would usually share
+	//   a TracerProvider.
+	CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+	// Alibaba Cloud Elastic Compute Service
+	CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+	// Alibaba Cloud Function Compute
+	CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+	// Red Hat OpenShift on Alibaba Cloud
+	CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+	// AWS Elastic Compute Cloud
+	CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+	// AWS Elastic Container Service
+	CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+	// AWS Elastic Kubernetes Service
+	CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+	// AWS Lambda
+	CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+	// AWS Elastic Beanstalk
+	CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+	// AWS App Runner
+	CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+	// Red Hat OpenShift on AWS (ROSA)
+	CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+	// Azure Virtual Machines
+	CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+	// Azure Container Instances
+	CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+	// Azure Kubernetes Service
+	CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+	// Azure Functions
+	CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+	// Azure App Service
+	CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+	// Azure Red Hat OpenShift
+	CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+	// Google Bare Metal Solution (BMS)
+	CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+	// Google Cloud Compute Engine (GCE)
+	CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+	// Google Cloud Run
+	CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+	// Google Cloud Kubernetes Engine (GKE)
+	CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+	// Google Cloud Functions (GCF)
+	CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+	// Google Cloud App Engine (GAE)
+	CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+	// Red Hat OpenShift on Google Cloud
+	CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+	// Red Hat OpenShift on IBM Cloud
+	CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+	// Tencent Cloud Cloud Virtual Machine (CVM)
+	CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+	// Tencent Cloud Elastic Kubernetes Service (EKS)
+	CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+	// Tencent Cloud Serverless Cloud Function (SCF)
+	CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+	// Alibaba Cloud
+	CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+	// Amazon Web Services
+	CloudProviderAWS = CloudProviderKey.String("aws")
+	// Microsoft Azure
+	CloudProviderAzure = CloudProviderKey.String("azure")
+	// Google Cloud Platform
+	CloudProviderGCP = CloudProviderKey.String("gcp")
+	// Heroku Platform as a Service
+	CloudProviderHeroku = CloudProviderKey.String("heroku")
+	// IBM Cloud
+	CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+	// Tencent Cloud
+	CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+	return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+	return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+	return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+// on Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+	return CloudResourceIDKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+	// AWSECSClusterARNKey is the attribute Key conforming to the
+	// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+	// [ECS
+	// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+	// AWSECSContainerARNKey is the attribute Key conforming to the
+	// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+	// Resource Name (ARN) of an [ECS container
+	// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+	AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+	// AWSECSLaunchtypeKey is the attribute Key conforming to the
+	// "aws.ecs.launchtype" semantic conventions. It represents the [launch
+	// type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+	// for an ECS task.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+	// AWSECSTaskARNKey is the attribute Key conforming to the
+	// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+	// [ECS task
+	// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+	// AWSECSTaskFamilyKey is the attribute Key conforming to the
+	// "aws.ecs.task.family" semantic conventions. It represents the task
+	// definition family this task definition is a member of.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-family'
+	AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+	// AWSECSTaskRevisionKey is the attribute Key conforming to the
+	// "aws.ecs.task.revision" semantic conventions. It represents the revision
+	// for this task definition.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '8', '26'
+	AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+	// ec2
+	AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+	// fargate
+	AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+	return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+	return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+	return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+	return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+	return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+	// AWSEKSClusterARNKey is the attribute Key conforming to the
+	// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+	// EKS cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+	AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+	return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+	// AWSLogGroupARNsKey is the attribute Key conforming to the
+	// "aws.log.group.arns" semantic conventions. It represents the Amazon
+	// Resource Name(s) (ARN) of the AWS log group(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+	// Note: See the [log group ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+	// AWSLogGroupNamesKey is the attribute Key conforming to the
+	// "aws.log.group.names" semantic conventions. It represents the name(s) of
+	// the AWS log group(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+	// Note: Multiple log groups must be supported for cases like
+	// multi-container applications, where a single application has sidecar
+	// containers, and each write to their own log group.
+	AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+	// AWSLogStreamARNsKey is the attribute Key conforming to the
+	// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+	// the AWS log stream(s).
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	// Note: See the [log stream ARN format
+	// documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+	// One log group can contain several log streams, so these ARNs necessarily
+	// identify both a log group and a log stream.
+	AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+	// AWSLogStreamNamesKey is the attribute Key conforming to the
+	// "aws.log.stream.names" semantic conventions. It represents the name(s)
+	// of the AWS log stream(s) an application is writing to.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+	AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+	return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+	return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+	return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+	return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Resource used by Google Cloud Run.
+const (
+	// GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+	// "gcp.cloud_run.job.execution" semantic conventions. It represents the
+	// name of the Cloud Run
+	// [execution](https://cloud.google.com/run/docs/managing/job-executions)
+	// being run for the Job, as set by the
+	// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+	// environment variable.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'job-name-xxxx', 'sample-job-mdw84'
+	GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+	// GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+	// "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+	// index for a task within an execution as provided by the
+	// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+	// environment variable.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 1
+	GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+	return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+	return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Resources used by Google Compute Engine (GCE).
+const (
+	// GCPGceInstanceHostnameKey is the attribute Key conforming to the
+	// "gcp.gce.instance.hostname" semantic conventions. It represents the
+	// hostname of a GCE instance. This is the full value of the default or
+	// [custom
+	// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-host1234.example.com',
+	// 'sample-vm.us-west1-b.c.my-project.internal'
+	GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+	// GCPGceInstanceNameKey is the attribute Key conforming to the
+	// "gcp.gce.instance.name" semantic conventions. It represents the instance
+	// name of a GCE instance. This is the value provided by `host.name`, the
+	// visible name of the instance in the Cloud Console UI, and the prefix for
+	// the default hostname of the instance as defined by the [default internal
+	// DNS
+	// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'instance-1', 'my-vm-name'
+	GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+	return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+	return GCPGceInstanceNameKey.String(val)
+}
+
+// Heroku dyno metadata
+const (
+	// HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+	// semantic conventions. It represents the unique identifier for the
+	// application
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+	HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+	// HerokuReleaseCommitKey is the attribute Key conforming to the
+	// "heroku.release.commit" semantic conventions. It represents the commit
+	// hash for the current release
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+	HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+	// HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+	// "heroku.release.creation_timestamp" semantic conventions. It represents
+	// the time and date the release was created
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2022-10-23T18:00:42Z'
+	HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+	return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+	return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+	return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// A container instance.
+const (
+	// ContainerCommandKey is the attribute Key conforming to the
+	// "container.command" semantic conventions. It represents the command used
+	// to run the container (i.e. the command name).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcontribcol'
+	// Note: If using embedded credentials or sensitive data, it is recommended
+	// to remove them to prevent potential leakage.
+	ContainerCommandKey = attribute.Key("container.command")
+
+	// ContainerCommandArgsKey is the attribute Key conforming to the
+	// "container.command_args" semantic conventions. It represents the all the
+	// command arguments (including the command/executable itself) run by the
+	// container. [2]
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcontribcol, --config, config.yaml'
+	ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+	// ContainerCommandLineKey is the attribute Key conforming to the
+	// "container.command_line" semantic conventions. It represents the full
+	// command run by the container as a single string representing the full
+	// command. [2]
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'otelcontribcol --config config.yaml'
+	ContainerCommandLineKey = attribute.Key("container.command_line")
+
+	// ContainerIDKey is the attribute Key conforming to the "container.id"
+	// semantic conventions. It represents the container ID. Usually a UUID, as
+	// for example used to [identify Docker
+	// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+	// The UUID might be abbreviated.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'a3bf90e006b2'
+	ContainerIDKey = attribute.Key("container.id")
+
+	// ContainerImageIDKey is the attribute Key conforming to the
+	// "container.image.id" semantic conventions. It represents the runtime
+	// specific image identifier. Usually a hash algorithm followed by a UUID.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+	// Note: Docker defines a sha256 of the image id; `container.image.id`
+	// corresponds to the `Image` field from the Docker container inspect
+	// [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+	// endpoint.
+	// K8S defines a link to the container registry repository with digest
+	// `"imageID": "registry.azurecr.io
+	// /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+	// The ID is assinged by the container runtime and can vary in different
+	// environments. Consider using `oci.manifest.digest` if it is important to
+	// identify the same image in different environments/runtimes.
+	ContainerImageIDKey = attribute.Key("container.image.id")
+
+	// ContainerImageNameKey is the attribute Key conforming to the
+	// "container.image.name" semantic conventions. It represents the name of
+	// the image the container was built on.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'gcr.io/opentelemetry/operator'
+	ContainerImageNameKey = attribute.Key("container.image.name")
+
+	// ContainerImageRepoDigestsKey is the attribute Key conforming to the
+	// "container.image.repo_digests" semantic conventions. It represents the
+	// repo digests of the container image as provided by the container
+	// runtime.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+	// 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+	// Note:
+	// [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+	// and
+	// [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+	// report those under the `RepoDigests` field.
+	ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+	// ContainerImageTagsKey is the attribute Key conforming to the
+	// "container.image.tags" semantic conventions. It represents the container
+	// image tags. An example can be found in [Docker Image
+	// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+	// Should be only the `<tag>` section of the full name for example from
+	// `registry.example.com/my-org/my-image:<tag>`.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'v1.27.1', '3.5.7-0'
+	ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+	// ContainerNameKey is the attribute Key conforming to the "container.name"
+	// semantic conventions. It represents the container name used by container
+	// runtime.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-autoconf'
+	ContainerNameKey = attribute.Key("container.name")
+
+	// ContainerRuntimeKey is the attribute Key conforming to the
+	// "container.runtime" semantic conventions. It represents the container
+	// runtime managing this container.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'docker', 'containerd', 'rkt'
+	ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+	return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+	return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+	return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+	return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+	return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+	return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+	return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `<tag>` section of the full name for example from
+// `registry.example.com/my-org/my-image:<tag>`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+	return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+	return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+	return ContainerRuntimeKey.String(val)
+}
+
+// The software deployment.
+const (
+	// DeploymentEnvironmentKey is the attribute Key conforming to the
+	// "deployment.environment" semantic conventions. It represents the name of
+	// the [deployment
+	// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+	// deployment tier).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'staging', 'production'
+	DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+	return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+	// DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+	// conventions. It represents a unique identifier representing the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+	// Note: The device identifier MUST only be defined using the values
+	// outlined below. This value is not an advertising identifier and MUST NOT
+	// be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+	// to the [vendor
+	// identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+	// On Android (Java or Kotlin), this value MUST be equal to the Firebase
+	// Installation ID or a globally unique UUID which is persisted across
+	// sessions in your application. More information can be found
+	// [here](https://developer.android.com/training/articles/user-data-ids) on
+	// best practices and exact implementation details. Caution should be taken
+	// when storing personal data or anything which can identify a user. GDPR
+	// and data protection laws may apply, ensure you do your own due
+	// diligence.
+	DeviceIDKey = attribute.Key("device.id")
+
+	// DeviceManufacturerKey is the attribute Key conforming to the
+	// "device.manufacturer" semantic conventions. It represents the name of
+	// the device manufacturer
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Apple', 'Samsung'
+	// Note: The Android OS provides this field via
+	// [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+	// iOS apps SHOULD hardcode the value `Apple`.
+	DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+	// DeviceModelIdentifierKey is the attribute Key conforming to the
+	// "device.model.identifier" semantic conventions. It represents the model
+	// identifier for the device
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'iPhone3,4', 'SM-G920F'
+	// Note: It's recommended this value represents a machine readable version
+	// of the model identifier rather than the market or consumer-friendly name
+	// of the device.
+	DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+	// DeviceModelNameKey is the attribute Key conforming to the
+	// "device.model.name" semantic conventions. It represents the marketing
+	// name for the device model
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+	// Note: It's recommended this value represents a human readable version of
+	// the device model rather than a machine readable alternative.
+	DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+	return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+	return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+	return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+	return DeviceModelNameKey.String(val)
+}
+
+// A serverless instance.
+const (
+	// FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+	// semantic conventions. It represents the execution environment ID as a
+	// string, that will be potentially reused for other invocations to the
+	// same function/function version.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+	// Note: * **AWS Lambda:** Use the (full) log stream name.
+	FaaSInstanceKey = attribute.Key("faas.instance")
+
+	// FaaSMaxMemoryKey is the attribute Key conforming to the
+	// "faas.max_memory" semantic conventions. It represents the amount of
+	// memory available to the serverless function converted to Bytes.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 134217728
+	// Note: It's recommended to set this attribute since e.g. too little
+	// memory can easily stop a Java AWS Lambda function from working
+	// correctly. On AWS Lambda, the environment variable
+	// `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+	// be multiplied by 1,048,576).
+	FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+	// FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+	// conventions. It represents the name of the single function that this
+	// runtime instance executes.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+	// Note: This is the name of the function as configured/deployed on the
+	// FaaS
+	// platform and is usually different from the name of the callback
+	// function (which may be stored in the
+	// [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+	// span attributes).
+	//
+	// For some cloud providers, the above definition is ambiguous. The
+	// following
+	// definition of function name MUST be used for this attribute
+	// (and consequently the span name) for the listed cloud
+	// providers/products:
+	//
+	// * **Azure:**  The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+	//   followed by a forward slash followed by the function name (this form
+	//   can also be seen in the resource JSON for the function).
+	//   This means that a span attribute MUST be used, as an Azure function
+	//   app can host multiple functions that would usually share
+	//   a TracerProvider (see also the `cloud.resource_id` attribute).
+	FaaSNameKey = attribute.Key("faas.name")
+
+	// FaaSVersionKey is the attribute Key conforming to the "faas.version"
+	// semantic conventions. It represents the immutable version of the
+	// function being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '26', 'pinkfroid-00002'
+	// Note: Depending on the cloud provider and platform, use:
+	//
+	// * **AWS Lambda:** The [function
+	// version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+	//   (an integer represented as a decimal string).
+	// * **Google Cloud Run (Services):** The
+	// [revision](https://cloud.google.com/run/docs/managing/revisions)
+	//   (i.e., the function name plus the revision suffix).
+	// * **Google Cloud Functions:** The value of the
+	//   [`K_REVISION` environment
+	// variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+	// * **Azure Functions:** Not applicable. Do not set this attribute.
+	FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+	return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+	return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+	return FaaSNameKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+	return FaaSVersionKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+	// HostArchKey is the attribute Key conforming to the "host.arch" semantic
+	// conventions. It represents the CPU architecture the host system is
+	// running on.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	HostArchKey = attribute.Key("host.arch")
+
+	// HostIDKey is the attribute Key conforming to the "host.id" semantic
+	// conventions. It represents the unique host ID. For Cloud, this must be
+	// the instance_id assigned by the cloud provider. For non-containerized
+	// systems, this should be the `machine-id`. See the table below for the
+	// sources to use to determine the `machine-id` based on operating system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+	HostIDKey = attribute.Key("host.id")
+
+	// HostImageIDKey is the attribute Key conforming to the "host.image.id"
+	// semantic conventions. It represents the vM image ID or host OS image ID.
+	// For Cloud, this value is from the provider.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ami-07b06b442921831e5'
+	HostImageIDKey = attribute.Key("host.image.id")
+
+	// HostImageNameKey is the attribute Key conforming to the
+	// "host.image.name" semantic conventions. It represents the name of the VM
+	// image or OS install the host was instantiated from.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+	HostImageNameKey = attribute.Key("host.image.name")
+
+	// HostImageVersionKey is the attribute Key conforming to the
+	// "host.image.version" semantic conventions. It represents the version
+	// string of the VM image or host OS as defined in [Version
+	// Attributes](README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0.1'
+	HostImageVersionKey = attribute.Key("host.image.version")
+
+	// HostIPKey is the attribute Key conforming to the "host.ip" semantic
+	// conventions. It represents the available IP addresses of the host,
+	// excluding loopback interfaces.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+	// Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+	// addresses MUST be specified in the [RFC
+	// 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+	HostIPKey = attribute.Key("host.ip")
+
+	// HostNameKey is the attribute Key conforming to the "host.name" semantic
+	// conventions. It represents the name of the host. On Unix systems, it may
+	// contain what the hostname command returns, or the fully qualified
+	// hostname, or another name specified by the user.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-test'
+	HostNameKey = attribute.Key("host.name")
+
+	// HostTypeKey is the attribute Key conforming to the "host.type" semantic
+	// conventions. It represents the type of host. For Cloud, this must be the
+	// machine type.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'n1-standard-1'
+	HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+	// AMD64
+	HostArchAMD64 = HostArchKey.String("amd64")
+	// ARM32
+	HostArchARM32 = HostArchKey.String("arm32")
+	// ARM64
+	HostArchARM64 = HostArchKey.String("arm64")
+	// Itanium
+	HostArchIA64 = HostArchKey.String("ia64")
+	// 32-bit PowerPC
+	HostArchPPC32 = HostArchKey.String("ppc32")
+	// 64-bit PowerPC
+	HostArchPPC64 = HostArchKey.String("ppc64")
+	// IBM z/Architecture
+	HostArchS390x = HostArchKey.String("s390x")
+	// 32-bit x86
+	HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+	return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+	return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+	return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+	return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+	return HostIPKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+	return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+	return HostTypeKey.String(val)
+}
+
+// A host's CPU information
+const (
+	// HostCPUCacheL2SizeKey is the attribute Key conforming to the
+	// "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+	// of level 2 memory cache available to the processor (in Bytes).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 12288000
+	HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+	// HostCPUFamilyKey is the attribute Key conforming to the
+	// "host.cpu.family" semantic conventions. It represents the numeric value
+	// specifying the family or generation of the CPU.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 6
+	HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+	// HostCPUModelIDKey is the attribute Key conforming to the
+	// "host.cpu.model.id" semantic conventions. It represents the model
+	// identifier. It provides more granular information about the CPU,
+	// distinguishing it from other CPUs within the same family.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 6
+	HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+	// HostCPUModelNameKey is the attribute Key conforming to the
+	// "host.cpu.model.name" semantic conventions. It represents the model
+	// designation of the processor.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+	HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+	// HostCPUSteppingKey is the attribute Key conforming to the
+	// "host.cpu.stepping" semantic conventions. It represents the stepping or
+	// core revisions.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1
+	HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+	// HostCPUVendorIDKey is the attribute Key conforming to the
+	// "host.cpu.vendor.id" semantic conventions. It represents the processor
+	// manufacturer identifier. A maximum 12-character string.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'GenuineIntel'
+	// Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+	// ID string in EBX, EDX and ECX registers. Writing these to memory in this
+	// order results in a 12-character string.
+	HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+	return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the numeric value
+// specifying the family or generation of the CPU.
+func HostCPUFamily(val int) attribute.KeyValue {
+	return HostCPUFamilyKey.Int(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val int) attribute.KeyValue {
+	return HostCPUModelIDKey.Int(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+	return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val int) attribute.KeyValue {
+	return HostCPUSteppingKey.Int(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+	return HostCPUVendorIDKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+	// K8SClusterNameKey is the attribute Key conforming to the
+	// "k8s.cluster.name" semantic conventions. It represents the name of the
+	// cluster.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-cluster'
+	K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+	// K8SClusterUIDKey is the attribute Key conforming to the
+	// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+	// the cluster, set to the UID of the `kube-system` namespace.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+	// Note: K8S does not have support for obtaining a cluster ID. If this is
+	// ever
+	// added, we will recommend collecting the `k8s.cluster.uid` through the
+	// official APIs. In the meantime, we are able to use the `uid` of the
+	// `kube-system` namespace as a proxy for cluster ID. Read on for the
+	// rationale.
+	//
+	// Every object created in a K8S cluster is assigned a distinct UID. The
+	// `kube-system` namespace is used by Kubernetes itself and will exist
+	// for the lifetime of the cluster. Using the `uid` of the `kube-system`
+	// namespace is a reasonable proxy for the K8S ClusterID as it will only
+	// change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+	// UUIDs as standardized by
+	// [ISO/IEC 9834-8 and ITU-T
+	// X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+	// Which states:
+	//
+	// > If generated according to one of the mechanisms defined in Rec.
+	//   ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+	//   different from all other UUIDs generated before 3603 A.D., or is
+	//   extremely likely to be different (depending on the mechanism chosen).
+	//
+	// Therefore, UIDs between clusters should be extremely unlikely to
+	// conflict.
+	K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+	return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+	return K8SClusterUIDKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+	// K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+	// semantic conventions. It represents the name of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'node-1'
+	K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+	// K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+	// semantic conventions. It represents the UID of the Node.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+	K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+	return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+	return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+	// K8SNamespaceNameKey is the attribute Key conforming to the
+	// "k8s.namespace.name" semantic conventions. It represents the name of the
+	// namespace that the pod is running in.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'default'
+	K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+	return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+	// K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+	// semantic conventions. It represents the name of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry-pod-autoconf'
+	K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+	// K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+	// semantic conventions. It represents the UID of the Pod.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+)
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+	return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+	return K8SPodUIDKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+	// K8SContainerNameKey is the attribute Key conforming to the
+	// "k8s.container.name" semantic conventions. It represents the name of the
+	// Container from Pod specification, must be unique within a Pod. Container
+	// runtime usually uses different globally unique name (`container.name`).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'redis'
+	K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+	// K8SContainerRestartCountKey is the attribute Key conforming to the
+	// "k8s.container.restart_count" semantic conventions. It represents the
+	// number of times the container was restarted. This attribute can be used
+	// to identify a particular container (running or stopped) within a
+	// container spec.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 2
+	K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+	return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+	return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+	// K8SReplicaSetNameKey is the attribute Key conforming to the
+	// "k8s.replicaset.name" semantic conventions. It represents the name of
+	// the ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+	// K8SReplicaSetUIDKey is the attribute Key conforming to the
+	// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+	// ReplicaSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+)
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+	return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+	return K8SReplicaSetUIDKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+	// K8SDeploymentNameKey is the attribute Key conforming to the
+	// "k8s.deployment.name" semantic conventions. It represents the name of
+	// the Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+	// K8SDeploymentUIDKey is the attribute Key conforming to the
+	// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+	// Deployment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+)
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+	return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+	return K8SDeploymentUIDKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+	// K8SStatefulSetNameKey is the attribute Key conforming to the
+	// "k8s.statefulset.name" semantic conventions. It represents the name of
+	// the StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+	// K8SStatefulSetUIDKey is the attribute Key conforming to the
+	// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+	// StatefulSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+	return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+	return K8SStatefulSetUIDKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+	// K8SDaemonSetNameKey is the attribute Key conforming to the
+	// "k8s.daemonset.name" semantic conventions. It represents the name of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+	// K8SDaemonSetUIDKey is the attribute Key conforming to the
+	// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+	// DaemonSet.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+)
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+	return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+	return K8SDaemonSetUIDKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+	// K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+	// semantic conventions. It represents the name of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SJobNameKey = attribute.Key("k8s.job.name")
+
+	// K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+	// semantic conventions. It represents the UID of the Job.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SJobUIDKey = attribute.Key("k8s.job.uid")
+)
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+	return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+	return K8SJobUIDKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+	// K8SCronJobNameKey is the attribute Key conforming to the
+	// "k8s.cronjob.name" semantic conventions. It represents the name of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+	// K8SCronJobUIDKey is the attribute Key conforming to the
+	// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+	// CronJob.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+	K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+)
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+	return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+	return K8SCronJobUIDKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+	// OciManifestDigestKey is the attribute Key conforming to the
+	// "oci.manifest.digest" semantic conventions. It represents the digest of
+	// the OCI image manifest. For container images specifically is the digest
+	// by which the container image is known.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+	// Note: Follows [OCI Image Manifest
+	// Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+	// and specifically the [Digest
+	// property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+	// An example can be found in [Example Image
+	// Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+	OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+	return OciManifestDigestKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+	// OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+	// semantic conventions. It represents the unique identifier for a
+	// particular build or compilation of the operating system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+	OSBuildIDKey = attribute.Key("os.build_id")
+
+	// OSDescriptionKey is the attribute Key conforming to the "os.description"
+	// semantic conventions. It represents the human readable (not intended to
+	// be parsed) OS version information, like e.g. reported by `ver` or
+	// `lsb_release -a` commands.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+	// LTS'
+	OSDescriptionKey = attribute.Key("os.description")
+
+	// OSNameKey is the attribute Key conforming to the "os.name" semantic
+	// conventions. It represents the human readable operating system name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'iOS', 'Android', 'Ubuntu'
+	OSNameKey = attribute.Key("os.name")
+
+	// OSTypeKey is the attribute Key conforming to the "os.type" semantic
+	// conventions. It represents the operating system type.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	OSTypeKey = attribute.Key("os.type")
+
+	// OSVersionKey is the attribute Key conforming to the "os.version"
+	// semantic conventions. It represents the version string of the operating
+	// system as defined in [Version
+	// Attributes](/docs/resource/README.md#version-attributes).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '14.2.1', '18.04.1'
+	OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+	// Microsoft Windows
+	OSTypeWindows = OSTypeKey.String("windows")
+	// Linux
+	OSTypeLinux = OSTypeKey.String("linux")
+	// Apple Darwin
+	OSTypeDarwin = OSTypeKey.String("darwin")
+	// FreeBSD
+	OSTypeFreeBSD = OSTypeKey.String("freebsd")
+	// NetBSD
+	OSTypeNetBSD = OSTypeKey.String("netbsd")
+	// OpenBSD
+	OSTypeOpenBSD = OSTypeKey.String("openbsd")
+	// DragonFly BSD
+	OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+	// HP-UX (Hewlett Packard Unix)
+	OSTypeHPUX = OSTypeKey.String("hpux")
+	// AIX (Advanced Interactive eXecutive)
+	OSTypeAIX = OSTypeKey.String("aix")
+	// SunOS, Oracle Solaris
+	OSTypeSolaris = OSTypeKey.String("solaris")
+	// IBM z/OS
+	OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+	return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+	return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+	return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+	return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+	// ProcessCommandKey is the attribute Key conforming to the
+	// "process.command" semantic conventions. It represents the command used
+	// to launch the process (i.e. the command name). On Linux based systems,
+	// can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+	// be set to the first parameter extracted from `GetCommandLineW`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: experimental
+	// Examples: 'cmd/otelcol'
+	ProcessCommandKey = attribute.Key("process.command")
+
+	// ProcessCommandArgsKey is the attribute Key conforming to the
+	// "process.command_args" semantic conventions. It represents the all the
+	// command arguments (including the command/executable itself) as received
+	// by the process. On Linux-based systems (and some other Unixoid systems
+	// supporting procfs), can be set according to the list of null-delimited
+	// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+	// this would be the full argv vector passed to `main`.
+	//
+	// Type: string[]
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: experimental
+	// Examples: 'cmd/otecol', '--config=config.yaml'
+	ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+	// ProcessCommandLineKey is the attribute Key conforming to the
+	// "process.command_line" semantic conventions. It represents the full
+	// command used to launch the process as a single string representing the
+	// full command. On Windows, can be set to the result of `GetCommandLineW`.
+	// Do not set this if you have to assemble it just for monitoring; use
+	// `process.command_args` instead.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: experimental
+	// Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+	ProcessCommandLineKey = attribute.Key("process.command_line")
+
+	// ProcessExecutableNameKey is the attribute Key conforming to the
+	// "process.executable.name" semantic conventions. It represents the name
+	// of the process executable. On Linux based systems, can be set to the
+	// `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+	// of `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: experimental
+	// Examples: 'otelcol'
+	ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+	// ProcessExecutablePathKey is the attribute Key conforming to the
+	// "process.executable.path" semantic conventions. It represents the full
+	// path to the process executable. On Linux based systems, can be set to
+	// the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+	// `GetProcessImageFileNameW`.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (See alternative attributes
+	// below.)
+	// Stability: experimental
+	// Examples: '/usr/bin/cmd/otelcol'
+	ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+	// ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+	// semantic conventions. It represents the username of the user that owns
+	// the process.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'root'
+	ProcessOwnerKey = attribute.Key("process.owner")
+
+	// ProcessParentPIDKey is the attribute Key conforming to the
+	// "process.parent_pid" semantic conventions. It represents the parent
+	// Process identifier (PID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 111
+	ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+	// ProcessPIDKey is the attribute Key conforming to the "process.pid"
+	// semantic conventions. It represents the process identifier (PID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1234
+	ProcessPIDKey = attribute.Key("process.pid")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+	return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+	return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+	return ProcessCommandLineKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+	return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+	return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+	return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+	return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+	return ProcessPIDKey.Int(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+	// ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+	// "process.runtime.description" semantic conventions. It represents an
+	// additional description about the runtime of the process, for example a
+	// specific vendor customization of the runtime environment.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+	ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+	// ProcessRuntimeNameKey is the attribute Key conforming to the
+	// "process.runtime.name" semantic conventions. It represents the name of
+	// the runtime of this process. For compiled native binaries, this SHOULD
+	// be the name of the compiler.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'OpenJDK Runtime Environment'
+	ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+	// ProcessRuntimeVersionKey is the attribute Key conforming to the
+	// "process.runtime.version" semantic conventions. It represents the
+	// version of the runtime of this process, as returned by the runtime
+	// without modification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '14.0.2'
+	ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+)
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+	return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+	return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+	return ProcessRuntimeVersionKey.String(val)
+}
+
+// A service instance.
+const (
+	// ServiceNameKey is the attribute Key conforming to the "service.name"
+	// semantic conventions. It represents the logical name of the service.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'shoppingcart'
+	// Note: MUST be the same for all instances of horizontally scaled
+	// services. If the value was not specified, SDKs MUST fallback to
+	// `unknown_service:` concatenated with
+	// [`process.executable.name`](process.md#process), e.g.
+	// `unknown_service:bash`. If `process.executable.name` is not available,
+	// the value MUST be set to `unknown_service`.
+	ServiceNameKey = attribute.Key("service.name")
+
+	// ServiceVersionKey is the attribute Key conforming to the
+	// "service.version" semantic conventions. It represents the version string
+	// of the service API or implementation. The format is not defined by these
+	// conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2.0.0', 'a01dbef8a'
+	ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+	return ServiceNameKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+	return ServiceVersionKey.String(val)
+}
+
+// A service instance.
+const (
+	// ServiceInstanceIDKey is the attribute Key conforming to the
+	// "service.instance.id" semantic conventions. It represents the string ID
+	// of the service instance.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'my-k8s-pod-deployment-1',
+	// '627cc493-f310-47de-96bd-71410b7dec09'
+	// Note: MUST be unique for each instance of the same
+	// `service.namespace,service.name` pair (in other words
+	// `service.namespace,service.name,service.instance.id` triplet MUST be
+	// globally unique). The ID helps to distinguish instances of the same
+	// service that exist at the same time (e.g. instances of a horizontally
+	// scaled service). It is preferable for the ID to be persistent and stay
+	// the same for the lifetime of the service instance, however it is
+	// acceptable that the ID is ephemeral and changes during important
+	// lifetime events for the service (e.g. service restarts). If the service
+	// has no inherent unique ID that can be used as the value of this
+	// attribute it is recommended to generate a random Version 1 or Version 4
+	// RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+	// Version 5, see RFC 4122 for more recommendations).
+	ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+	// ServiceNamespaceKey is the attribute Key conforming to the
+	// "service.namespace" semantic conventions. It represents a namespace for
+	// `service.name`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Shop'
+	// Note: A string value having a meaning that helps to distinguish a group
+	// of services, for example the team name that owns a group of services.
+	// `service.name` is expected to be unique within the same namespace. If
+	// `service.namespace` is not specified in the Resource then `service.name`
+	// is expected to be unique for all services that have no explicit
+	// namespace defined (so the empty/unspecified namespace is simply one more
+	// valid namespace). Zero-length namespace string is assumed equal to
+	// unspecified namespace.
+	ServiceNamespaceKey = attribute.Key("service.namespace")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+	return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+	return ServiceNamespaceKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+	// TelemetrySDKLanguageKey is the attribute Key conforming to the
+	// "telemetry.sdk.language" semantic conventions. It represents the
+	// language of the telemetry SDK.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+	// TelemetrySDKNameKey is the attribute Key conforming to the
+	// "telemetry.sdk.name" semantic conventions. It represents the name of the
+	// telemetry SDK as defined above.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'opentelemetry'
+	// Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+	// to `opentelemetry`.
+	// If another SDK, like a fork or a vendor-provided implementation, is
+	// used, this SDK MUST set the
+	// `telemetry.sdk.name` attribute to the fully-qualified class or module
+	// name of this SDK's main entry point
+	// or another suitable identifier depending on the language.
+	// The identifier `opentelemetry` is reserved and MUST NOT be used in this
+	// case.
+	// All custom identifiers SHOULD be stable across different versions of an
+	// implementation.
+	TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+	// TelemetrySDKVersionKey is the attribute Key conforming to the
+	// "telemetry.sdk.version" semantic conventions. It represents the version
+	// string of the telemetry SDK.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: '1.2.3'
+	TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+var (
+	// cpp
+	TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+	// dotnet
+	TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+	// erlang
+	TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+	// go
+	TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+	// java
+	TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+	// nodejs
+	TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+	// php
+	TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+	// python
+	TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+	// ruby
+	TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+	// rust
+	TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+	// swift
+	TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+	// webjs
+	TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+	return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+	return TelemetrySDKVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+	// TelemetryDistroNameKey is the attribute Key conforming to the
+	// "telemetry.distro.name" semantic conventions. It represents the name of
+	// the auto instrumentation agent or distribution, if used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'parts-unlimited-java'
+	// Note: Official auto instrumentation agents and distributions SHOULD set
+	// the `telemetry.distro.name` attribute to
+	// a string starting with `opentelemetry-`, e.g.
+	// `opentelemetry-java-instrumentation`.
+	TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+	// TelemetryDistroVersionKey is the attribute Key conforming to the
+	// "telemetry.distro.version" semantic conventions. It represents the
+	// version string of the auto instrumentation agent or distribution, if
+	// used.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.2.3'
+	TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+	return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+	return TelemetryDistroVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+	// WebEngineDescriptionKey is the attribute Key conforming to the
+	// "webengine.description" semantic conventions. It represents the
+	// additional description of the web engine (e.g. detailed version and
+	// edition information).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+	// 2.2.2.Final'
+	WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+	// WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+	// semantic conventions. It represents the name of the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'WildFly'
+	WebEngineNameKey = attribute.Key("webengine.name")
+
+	// WebEngineVersionKey is the attribute Key conforming to the
+	// "webengine.version" semantic conventions. It represents the version of
+	// the web engine.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '21.0.0'
+	WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+	return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+	return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+	return WebEngineVersionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+	// OTelScopeNameKey is the attribute Key conforming to the
+	// "otel.scope.name" semantic conventions. It represents the name of the
+	// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'io.opentelemetry.contrib.mongodb'
+	OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+	// OTelScopeVersionKey is the attribute Key conforming to the
+	// "otel.scope.version" semantic conventions. It represents the version of
+	// the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.0.0'
+	OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+	return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+	return OTelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+	// OTelLibraryNameKey is the attribute Key conforming to the
+	// "otel.library.name" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: 'io.opentelemetry.contrib.mongodb'
+	// Deprecated: use the `otel.scope.name` attribute.
+	OTelLibraryNameKey = attribute.Key("otel.library.name")
+
+	// OTelLibraryVersionKey is the attribute Key conforming to the
+	// "otel.library.version" semantic conventions.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: deprecated
+	// Examples: '1.0.0'
+	// Deprecated: use the `otel.scope.version` attribute.
+	OTelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OTelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions.
+//
+// Deprecated: use the `otel.scope.name` attribute.
+func OTelLibraryName(val string) attribute.KeyValue {
+	return OTelLibraryNameKey.String(val)
+}
+
+// OTelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions.
+//
+// Deprecated: use the `otel.scope.version` attribute.
+func OTelLibraryVersion(val string) attribute.KeyValue {
+	return OTelLibraryVersionKey.String(val)
+}
diff --git a/example/fib/fib.go b/semconv/v1.22.0/schema.go
similarity index 61%
rename from example/fib/fib.go
rename to semconv/v1.22.0/schema.go
index 817cc63b104..4b00ddee9b9 100644
--- a/example/fib/fib.go
+++ b/semconv/v1.22.0/schema.go
@@ -12,24 +12,9 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-package main
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.22.0"
 
-import "fmt"
-
-// Fibonacci returns the n-th fibonacci number.
-func Fibonacci(n uint) (uint64, error) {
-	if n <= 1 {
-		return uint64(n), nil
-	}
-
-	if n > 93 {
-		return 0, fmt.Errorf("unsupported fibonacci number %d: too large", n)
-	}
-
-	var n2, n1 uint64 = 0, 1
-	for i := uint(2); i < n; i++ {
-		n2, n1 = n1, n1+n2
-	}
-
-	return n2 + n1, nil
-}
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.22.0"
diff --git a/semconv/v1.22.0/trace.go b/semconv/v1.22.0/trace.go
new file mode 100644
index 00000000000..243227a244b
--- /dev/null
+++ b/semconv/v1.22.0/trace.go
@@ -0,0 +1,2427 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.22.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+	// ExceptionMessageKey is the attribute Key conforming to the
+	// "exception.message" semantic conventions. It represents the exception
+	// message.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Division by zero', "Can't convert 'int' object to str
+	// implicitly"
+	ExceptionMessageKey = attribute.Key("exception.message")
+
+	// ExceptionStacktraceKey is the attribute Key conforming to the
+	// "exception.stacktrace" semantic conventions. It represents a stacktrace
+	// as a string in the natural representation for the language runtime. The
+	// representation is to be determined and documented by each language SIG.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+	// exception\\n at '
+	//  'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+	//  'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+	//  'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+	ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+	// ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+	// semantic conventions. It represents the type of the exception (its
+	// fully-qualified class name, if applicable). The dynamic type of the
+	// exception should be preferred over the static type in languages that
+	// support it.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'java.net.ConnectException', 'OSError'
+	ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+	return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+	return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+	return ExceptionTypeKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+	// PeerServiceKey is the attribute Key conforming to the "peer.service"
+	// semantic conventions. It represents the
+	// [`service.name`](/docs/resource/README.md#service) of the remote
+	// service. SHOULD be equal to the actual `service.name` resource attribute
+	// of the remote service if any.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'AuthTokenCache'
+	PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+	return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+	// EnduserIDKey is the attribute Key conforming to the "enduser.id"
+	// semantic conventions. It represents the username or client_id extracted
+	// from the access token or
+	// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+	// in the inbound request from outside the system.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'username'
+	EnduserIDKey = attribute.Key("enduser.id")
+
+	// EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+	// semantic conventions. It represents the actual/assumed role the client
+	// is making the request under extracted from token or application security
+	// context.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'admin'
+	EnduserRoleKey = attribute.Key("enduser.role")
+
+	// EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+	// semantic conventions. It represents the scopes or granted authorities
+	// the client currently possesses extracted from token or application
+	// security context. The value would come from the scope associated with an
+	// [OAuth 2.0 Access
+	// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+	// value in a [SAML 2.0
+	// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'read:message, write:files'
+	EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+	return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+	return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+	return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+	// ThreadDaemonKey is the attribute Key conforming to the "thread.daemon"
+	// semantic conventions. It represents the whether the thread is daemon or
+	// not.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	ThreadDaemonKey = attribute.Key("thread.daemon")
+
+	// ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+	// conventions. It represents the current "managed" thread ID (as opposed
+	// to OS thread ID).
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 42
+	ThreadIDKey = attribute.Key("thread.id")
+
+	// ThreadNameKey is the attribute Key conforming to the "thread.name"
+	// semantic conventions. It represents the current thread name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'main'
+	ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadDaemon returns an attribute KeyValue conforming to the
+// "thread.daemon" semantic conventions. It represents the whether the thread
+// is daemon or not.
+func ThreadDaemon(val bool) attribute.KeyValue {
+	return ThreadDaemonKey.Bool(val)
+}
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+	return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+	return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+	// CodeColumnKey is the attribute Key conforming to the "code.column"
+	// semantic conventions. It represents the column number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 16
+	CodeColumnKey = attribute.Key("code.column")
+
+	// CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+	// semantic conventions. It represents the source code file name that
+	// identifies the code unit as uniquely as possible (preferably an absolute
+	// file path).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '/usr/local/MyApplication/content_root/app/index.php'
+	CodeFilepathKey = attribute.Key("code.filepath")
+
+	// CodeFunctionKey is the attribute Key conforming to the "code.function"
+	// semantic conventions. It represents the method or function name, or
+	// equivalent (usually rightmost part of the code unit's name).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'serveRequest'
+	CodeFunctionKey = attribute.Key("code.function")
+
+	// CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+	// semantic conventions. It represents the line number in `code.filepath`
+	// best representing the operation. It SHOULD point within the code unit
+	// named in `code.function`.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 42
+	CodeLineNumberKey = attribute.Key("code.lineno")
+
+	// CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+	// semantic conventions. It represents the "namespace" within which
+	// `code.function` is defined. Usually the qualified class or module name,
+	// such that `code.namespace` + some separator + `code.function` form a
+	// unique identifier for the code unit.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'com.example.MyHTTPService'
+	CodeNamespaceKey = attribute.Key("code.namespace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+	return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+	return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+	return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+	return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+	return CodeNamespaceKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+	// AWSLambdaInvokedARNKey is the attribute Key conforming to the
+	// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+	// invoked ARN as provided on the `Context` passed to the function
+	// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+	// `/runtime/invocation/next` applicable).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+	// Note: This may be different from `cloud.resource_id` if an alias is
+	// involved.
+	AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+	return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+	// CloudeventsEventIDKey is the attribute Key conforming to the
+	// "cloudevents.event_id" semantic conventions. It represents the
+	// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+	// uniquely identifies the event.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+	CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+	// CloudeventsEventSourceKey is the attribute Key conforming to the
+	// "cloudevents.event_source" semantic conventions. It represents the
+	// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+	// identifies the context in which an event happened.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'https://github.com/cloudevents',
+	// '/cloudevents/spec/pull/123', 'my-service'
+	CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+	// CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+	// "cloudevents.event_spec_version" semantic conventions. It represents the
+	// [version of the CloudEvents
+	// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+	// which the event uses.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '1.0'
+	CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+	// CloudeventsEventSubjectKey is the attribute Key conforming to the
+	// "cloudevents.event_subject" semantic conventions. It represents the
+	// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+	// of the event in the context of the event producer (identified by
+	// source).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'mynewfile.jpg'
+	CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+	// CloudeventsEventTypeKey is the attribute Key conforming to the
+	// "cloudevents.event_type" semantic conventions. It represents the
+	// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+	// contains a value describing the type of event related to the originating
+	// occurrence.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'com.github.pull_request.opened',
+	// 'com.example.object.deleted.v2'
+	CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+	return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+	return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+	return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+	return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+	return CloudeventsEventTypeKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+	// OpentracingRefTypeKey is the attribute Key conforming to the
+	// "opentracing.ref_type" semantic conventions. It represents the
+	// parent-child Reference type
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Note: The causal relationship between a child Span and a parent Span.
+	OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+	// The parent Span depends on the child Span in some capacity
+	OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+	// The parent Span does not depend in any way on the result of the child Span
+	OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+	// DBConnectionStringKey is the attribute Key conforming to the
+	// "db.connection_string" semantic conventions. It represents the
+	// connection string used to connect to the database. It is recommended to
+	// remove embedded credentials.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+	DBConnectionStringKey = attribute.Key("db.connection_string")
+
+	// DBJDBCDriverClassnameKey is the attribute Key conforming to the
+	// "db.jdbc.driver_classname" semantic conventions. It represents the
+	// fully-qualified class name of the [Java Database Connectivity
+	// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+	// driver used to connect.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'org.postgresql.Driver',
+	// 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+	DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+	// DBNameKey is the attribute Key conforming to the "db.name" semantic
+	// conventions. It represents the this attribute is used to report the name
+	// of the database being accessed. For commands that switch the database,
+	// this should be set to the target database (even if the command fails).
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If applicable.)
+	// Stability: experimental
+	// Examples: 'customers', 'main'
+	// Note: In some SQL databases, the database name to be used is called
+	// "schema name". In case there are multiple layers that could be
+	// considered for database name (e.g. Oracle instance name and schema
+	// name), the database name to be used is the more specific layer (e.g.
+	// Oracle schema name).
+	DBNameKey = attribute.Key("db.name")
+
+	// DBOperationKey is the attribute Key conforming to the "db.operation"
+	// semantic conventions. It represents the name of the operation being
+	// executed, e.g. the [MongoDB command
+	// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+	// such as `findAndModify`, or the SQL keyword.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If `db.statement` is not
+	// applicable.)
+	// Stability: experimental
+	// Examples: 'findAndModify', 'HMSET', 'SELECT'
+	// Note: When setting this to an SQL keyword, it is not recommended to
+	// attempt any client-side parsing of `db.statement` just to get this
+	// property, but it should be set if the operation name is provided by the
+	// library being instrumented. If the SQL statement has an ambiguous
+	// operation, or performs more than one operation, this value may be
+	// omitted.
+	DBOperationKey = attribute.Key("db.operation")
+
+	// DBStatementKey is the attribute Key conforming to the "db.statement"
+	// semantic conventions. It represents the database statement being
+	// executed.
+	//
+	// Type: string
+	// RequirementLevel: Recommended (Should be collected by default only if
+	// there is sanitization that excludes sensitive information.)
+	// Stability: experimental
+	// Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+	DBStatementKey = attribute.Key("db.statement")
+
+	// DBSystemKey is the attribute Key conforming to the "db.system" semantic
+	// conventions. It represents an identifier for the database management
+	// system (DBMS) product being used. See below for a list of well-known
+	// identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	DBSystemKey = attribute.Key("db.system")
+
+	// DBUserKey is the attribute Key conforming to the "db.user" semantic
+	// conventions. It represents the username for accessing the database.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'readonly_user', 'reporting_user'
+	DBUserKey = attribute.Key("db.user")
+)
+
+var (
+	// Some other SQL database. Fallback only. See notes
+	DBSystemOtherSQL = DBSystemKey.String("other_sql")
+	// Microsoft SQL Server
+	DBSystemMSSQL = DBSystemKey.String("mssql")
+	// Microsoft SQL Server Compact
+	DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+	// MySQL
+	DBSystemMySQL = DBSystemKey.String("mysql")
+	// Oracle Database
+	DBSystemOracle = DBSystemKey.String("oracle")
+	// IBM DB2
+	DBSystemDB2 = DBSystemKey.String("db2")
+	// PostgreSQL
+	DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+	// Amazon Redshift
+	DBSystemRedshift = DBSystemKey.String("redshift")
+	// Apache Hive
+	DBSystemHive = DBSystemKey.String("hive")
+	// Cloudscape
+	DBSystemCloudscape = DBSystemKey.String("cloudscape")
+	// HyperSQL DataBase
+	DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+	// Progress Database
+	DBSystemProgress = DBSystemKey.String("progress")
+	// SAP MaxDB
+	DBSystemMaxDB = DBSystemKey.String("maxdb")
+	// SAP HANA
+	DBSystemHanaDB = DBSystemKey.String("hanadb")
+	// Ingres
+	DBSystemIngres = DBSystemKey.String("ingres")
+	// FirstSQL
+	DBSystemFirstSQL = DBSystemKey.String("firstsql")
+	// EnterpriseDB
+	DBSystemEDB = DBSystemKey.String("edb")
+	// InterSystems Caché
+	DBSystemCache = DBSystemKey.String("cache")
+	// Adabas (Adaptable Database System)
+	DBSystemAdabas = DBSystemKey.String("adabas")
+	// Firebird
+	DBSystemFirebird = DBSystemKey.String("firebird")
+	// Apache Derby
+	DBSystemDerby = DBSystemKey.String("derby")
+	// FileMaker
+	DBSystemFilemaker = DBSystemKey.String("filemaker")
+	// Informix
+	DBSystemInformix = DBSystemKey.String("informix")
+	// InstantDB
+	DBSystemInstantDB = DBSystemKey.String("instantdb")
+	// InterBase
+	DBSystemInterbase = DBSystemKey.String("interbase")
+	// MariaDB
+	DBSystemMariaDB = DBSystemKey.String("mariadb")
+	// Netezza
+	DBSystemNetezza = DBSystemKey.String("netezza")
+	// Pervasive PSQL
+	DBSystemPervasive = DBSystemKey.String("pervasive")
+	// PointBase
+	DBSystemPointbase = DBSystemKey.String("pointbase")
+	// SQLite
+	DBSystemSqlite = DBSystemKey.String("sqlite")
+	// Sybase
+	DBSystemSybase = DBSystemKey.String("sybase")
+	// Teradata
+	DBSystemTeradata = DBSystemKey.String("teradata")
+	// Vertica
+	DBSystemVertica = DBSystemKey.String("vertica")
+	// H2
+	DBSystemH2 = DBSystemKey.String("h2")
+	// ColdFusion IMQ
+	DBSystemColdfusion = DBSystemKey.String("coldfusion")
+	// Apache Cassandra
+	DBSystemCassandra = DBSystemKey.String("cassandra")
+	// Apache HBase
+	DBSystemHBase = DBSystemKey.String("hbase")
+	// MongoDB
+	DBSystemMongoDB = DBSystemKey.String("mongodb")
+	// Redis
+	DBSystemRedis = DBSystemKey.String("redis")
+	// Couchbase
+	DBSystemCouchbase = DBSystemKey.String("couchbase")
+	// CouchDB
+	DBSystemCouchDB = DBSystemKey.String("couchdb")
+	// Microsoft Azure Cosmos DB
+	DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+	// Amazon DynamoDB
+	DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+	// Neo4j
+	DBSystemNeo4j = DBSystemKey.String("neo4j")
+	// Apache Geode
+	DBSystemGeode = DBSystemKey.String("geode")
+	// Elasticsearch
+	DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+	// Memcached
+	DBSystemMemcached = DBSystemKey.String("memcached")
+	// CockroachDB
+	DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+	// OpenSearch
+	DBSystemOpensearch = DBSystemKey.String("opensearch")
+	// ClickHouse
+	DBSystemClickhouse = DBSystemKey.String("clickhouse")
+	// Cloud Spanner
+	DBSystemSpanner = DBSystemKey.String("spanner")
+	// Trino
+	DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+	return DBConnectionStringKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+	return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+	return DBNameKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+	return DBOperationKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+	return DBStatementKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+	return DBUserKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+	// DBMSSQLInstanceNameKey is the attribute Key conforming to the
+	// "db.mssql.instance_name" semantic conventions. It represents the
+	// Microsoft SQL Server [instance
+	// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+	// connecting to. This name is used to determine the port of a named
+	// instance.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'MSSQLSERVER'
+	// Note: If setting a `db.mssql.instance_name`, `server.port` is no longer
+	// required (but still recommended if non-standard).
+	DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+	return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+	// DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+	// "db.cassandra.consistency_level" semantic conventions. It represents the
+	// consistency level of the query. Based on consistency values from
+	// [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+	// DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.dc" semantic conventions. It represents the
+	// data center of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'us-west-2'
+	DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+	// DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+	// "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+	// of the coordinating node for a query.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+	DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+	// DBCassandraIdempotenceKey is the attribute Key conforming to the
+	// "db.cassandra.idempotence" semantic conventions. It represents the
+	// whether or not the query is idempotent.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+	// DBCassandraPageSizeKey is the attribute Key conforming to the
+	// "db.cassandra.page_size" semantic conventions. It represents the fetch
+	// size used for paging, i.e. how many rows will be returned at once.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 5000
+	DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+	// DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+	// to the "db.cassandra.speculative_execution_count" semantic conventions.
+	// It represents the number of times a query was speculatively executed.
+	// Not set or `0` if the query was not executed speculatively.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 0, 2
+	DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+	// DBCassandraTableKey is the attribute Key conforming to the
+	// "db.cassandra.table" semantic conventions. It represents the name of the
+	// primary table that the operation is acting upon, including the keyspace
+	// name (if applicable).
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'mytable'
+	// Note: This mirrors the db.sql.table attribute but references cassandra
+	// rather than sql. It is not recommended to attempt any client-side
+	// parsing of `db.statement` just to get this property, but it should be
+	// set if it is provided by the library being instrumented. If the
+	// operation is acting upon an anonymous table, or more than one table,
+	// this value MUST NOT be set.
+	DBCassandraTableKey = attribute.Key("db.cassandra.table")
+)
+
+var (
+	// all
+	DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+	// each_quorum
+	DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+	// quorum
+	DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+	// local_quorum
+	DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+	// one
+	DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+	// two
+	DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+	// three
+	DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+	// local_one
+	DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+	// any
+	DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+	// serial
+	DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+	// local_serial
+	DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+	return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+	return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+	return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+	return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+	return DBCassandraTableKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+	// DBRedisDBIndexKey is the attribute Key conforming to the
+	// "db.redis.database_index" semantic conventions. It represents the index
+	// of the database being accessed as used in the [`SELECT`
+	// command](https://redis.io/commands/select), provided as an integer. To
+	// be used instead of the generic `db.name` attribute.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If other than the default
+	// database (`0`).)
+	// Stability: experimental
+	// Examples: 0, 1, 15
+	DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+	return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+	// DBMongoDBCollectionKey is the attribute Key conforming to the
+	// "db.mongodb.collection" semantic conventions. It represents the
+	// collection being accessed within the database stated in `db.name`.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'customers', 'products'
+	DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+	return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for Elasticsearch
+const (
+	// DBElasticsearchClusterNameKey is the attribute Key conforming to the
+	// "db.elasticsearch.cluster.name" semantic conventions. It represents the
+	// represents the identifier of an Elasticsearch cluster.
+	//
+	// Type: string
+	// RequirementLevel: Recommended (When communicating with an Elastic Cloud
+	// deployment, this should be collected from the "X-Found-Handling-Cluster"
+	// HTTP response header.)
+	// Stability: experimental
+	// Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+	DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+	// DBElasticsearchNodeNameKey is the attribute Key conforming to the
+	// "db.elasticsearch.node.name" semantic conventions. It represents the
+	// represents the human-readable identifier of the node/instance to which a
+	// request was routed.
+	//
+	// Type: string
+	// RequirementLevel: Recommended (When communicating with an Elastic Cloud
+	// deployment, this should be collected from the
+	// "X-Found-Handling-Instance" HTTP response header.)
+	// Stability: experimental
+	// Examples: 'instance-0000000001'
+	DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+)
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+	return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+	return DBElasticsearchNodeNameKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+	// DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+	// semantic conventions. It represents the name of the primary table that
+	// the operation is acting upon, including the database name (if
+	// applicable).
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'public.users', 'customers'
+	// Note: It is not recommended to attempt any client-side parsing of
+	// `db.statement` just to get this property, but it should be set if it is
+	// provided by the library being instrumented. If the operation is acting
+	// upon an anonymous table, or more than one table, this value MUST NOT be
+	// set.
+	DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+	return DBSQLTableKey.String(val)
+}
+
+// Call-level attributes for Cosmos DB.
+const (
+	// DBCosmosDBClientIDKey is the attribute Key conforming to the
+	// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+	// Cosmos client instance id.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+	DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+	// DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+	// "db.cosmosdb.connection_mode" semantic conventions. It represents the
+	// cosmos client connection mode.
+	//
+	// Type: Enum
+	// RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as
+	// default))
+	// Stability: experimental
+	DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+	// DBCosmosDBContainerKey is the attribute Key conforming to the
+	// "db.cosmosdb.container" semantic conventions. It represents the cosmos
+	// DB container name.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (if available)
+	// Stability: experimental
+	// Examples: 'anystring'
+	DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container")
+
+	// DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+	// "db.cosmosdb.operation_type" semantic conventions. It represents the
+	// cosmosDB Operation Type.
+	//
+	// Type: Enum
+	// RequirementLevel: ConditionallyRequired (when performing one of the
+	// operations in this list)
+	// Stability: experimental
+	DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+	// DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+	// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+	// consumed for that operation
+	//
+	// Type: double
+	// RequirementLevel: ConditionallyRequired (when available)
+	// Stability: experimental
+	// Examples: 46.18, 1.0
+	DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+	// DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+	// "db.cosmosdb.request_content_length" semantic conventions. It represents
+	// the request payload size in bytes
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+	// DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+	// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+	// DB status code.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (if response was received)
+	// Stability: experimental
+	// Examples: 200, 201
+	DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+	// DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+	// "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+	// cosmos DB sub status code.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (when response was received and
+	// contained sub-code.)
+	// Stability: experimental
+	// Examples: 1000, 1002
+	DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+)
+
+var (
+	// Gateway (HTTP) connections mode
+	DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+	// Direct connection
+	DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+	// invalid
+	DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+	// create
+	DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+	// patch
+	DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+	// read
+	DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+	// read_feed
+	DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+	// delete
+	DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+	// replace
+	DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+	// execute
+	DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+	// query
+	DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+	// head
+	DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+	// head_feed
+	DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+	// upsert
+	DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+	// batch
+	DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+	// query_plan
+	DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+	// execute_javascript
+	DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+	return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBContainer returns an attribute KeyValue conforming to the
+// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB
+// container name.
+func DBCosmosDBContainer(val string) attribute.KeyValue {
+	return DBCosmosDBContainerKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+	return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+	return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+	return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+	return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+	// OTelStatusCodeKey is the attribute Key conforming to the
+	// "otel.status_code" semantic conventions. It represents the name of the
+	// code, either "OK" or "ERROR". MUST NOT be set if the status code is
+	// UNSET.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+	// OTelStatusDescriptionKey is the attribute Key conforming to the
+	// "otel.status_description" semantic conventions. It represents the
+	// description of the Status if it has a value, otherwise not set.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'resource not found'
+	OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+	// The operation has been validated by an Application developer or Operator to have completed successfully
+	OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+	// The operation contains an error
+	OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+	return OTelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+	// FaaSInvocationIDKey is the attribute Key conforming to the
+	// "faas.invocation_id" semantic conventions. It represents the invocation
+	// ID of the current function invocation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+	FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+)
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+	return FaaSInvocationIDKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+	// FaaSDocumentCollectionKey is the attribute Key conforming to the
+	// "faas.document.collection" semantic conventions. It represents the name
+	// of the source on which the triggering operation was performed. For
+	// example, in Cloud Storage or S3 corresponds to the bucket name, and in
+	// Cosmos DB to the database name.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'myBucketName', 'myDBName'
+	FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+	// FaaSDocumentNameKey is the attribute Key conforming to the
+	// "faas.document.name" semantic conventions. It represents the document
+	// name/table subjected to the operation. For example, in Cloud Storage or
+	// S3 is the name of the file, and in Cosmos DB the table name.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'myFile.txt', 'myTableName'
+	FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+	// FaaSDocumentOperationKey is the attribute Key conforming to the
+	// "faas.document.operation" semantic conventions. It represents the
+	// describes the type of the operation that was performed on the data.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+	// FaaSDocumentTimeKey is the attribute Key conforming to the
+	// "faas.document.time" semantic conventions. It represents a string
+	// containing the time when the data was accessed in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+)
+
+var (
+	// When a new object is created
+	FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+	// When an object is modified
+	FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+	// When an object is deleted
+	FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+	return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+	return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+	return FaaSDocumentTimeKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+	// FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+	// conventions. It represents a string containing the schedule period as
+	// [Cron
+	// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '0/5 * * * ? *'
+	FaaSCronKey = attribute.Key("faas.cron")
+
+	// FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+	// conventions. It represents a string containing the function invocation
+	// time in the [ISO
+	// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+	// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '2020-01-23T13:47:06Z'
+	FaaSTimeKey = attribute.Key("faas.time")
+)
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+	return FaaSCronKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+	return FaaSTimeKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+	// FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+	// semantic conventions. It represents a boolean that is true if the
+	// serverless function is executed for the first time (aka cold-start).
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+	return FaaSColdstartKey.Bool(val)
+}
+
+// The `aws` conventions apply to operations using the AWS SDK. They map
+// request or response parameters in AWS SDK API calls to attributes on a Span.
+// The conventions have been collected over time based on feedback from AWS
+// users of tracing and will continue to evolve as new interesting conventions
+// are found.
+// Some descriptions are also provided for populating general OpenTelemetry
+// semantic conventions based on these APIs.
+const (
+	// AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+	// semantic conventions. It represents the AWS request ID as returned in
+	// the response headers `x-amz-request-id` or `x-amz-requestid`.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+	AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+	return AWSRequestIDKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+	// AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+	// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+	// value of the `AttributesToGet` request parameter.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'lives', 'id'
+	AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+	// AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+	// "aws.dynamodb.consistent_read" semantic conventions. It represents the
+	// value of the `ConsistentRead` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+	// AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+	// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+	// JSON-serialized value of each item in the `ConsumedCapacity` response
+	// field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+	// "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+	// { "CapacityUnits": number, "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number }, "TableName": "string",
+	// "WriteCapacityUnits": number }'
+	AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+	// AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+	// "aws.dynamodb.index_name" semantic conventions. It represents the value
+	// of the `IndexName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'name_to_group'
+	AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+	// AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+	// the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+	// represents the JSON-serialized value of the `ItemCollectionMetrics`
+	// response field.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+	// blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+	// "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+	// "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+	// "SizeEstimateRangeGB": [ number ] } ] }'
+	AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+	// AWSDynamoDBLimitKey is the attribute Key conforming to the
+	// "aws.dynamodb.limit" semantic conventions. It represents the value of
+	// the `Limit` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+	// AWSDynamoDBProjectionKey is the attribute Key conforming to the
+	// "aws.dynamodb.projection" semantic conventions. It represents the value
+	// of the `ProjectionExpression` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+	// RelatedItems, ProductReviews'
+	AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+	// AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+	// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+	// represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+	// request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+	// AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+	// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+	// It represents the value of the
+	// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+	//
+	// Type: double
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 1.0, 2.0
+	AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+	// AWSDynamoDBSelectKey is the attribute Key conforming to the
+	// "aws.dynamodb.select" semantic conventions. It represents the value of
+	// the `Select` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'ALL_ATTRIBUTES', 'COUNT'
+	AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+	// AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_names" semantic conventions. It represents the keys
+	// in the `RequestItems` object field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Users', 'Cats'
+	AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+)
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+	return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+	return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+	return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+	return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+	return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+	return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+	return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+	return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+	return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// DynamoDB.CreateTable
+const (
+	// AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `GlobalSecondaryIndexes` request field
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+	// "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+	// "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+	// "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+	// AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+	// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+	// represents the JSON-serialized value of each item of the
+	// `LocalSecondaryIndexes` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "IndexARN": "string", "IndexName": "string",
+	// "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+	AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+	return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+	// AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+	// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+	// the value of the `ExclusiveStartTableName` request parameter.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Users', 'CatsTable'
+	AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+	// AWSDynamoDBTableCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.table_count" semantic conventions. It represents the the
+	// number of items in the `TableNames` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 20
+	AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+	return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+	return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+	// AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+	// "aws.dynamodb.scan_forward" semantic conventions. It represents the
+	// value of the `ScanIndexForward` request parameter.
+	//
+	// Type: boolean
+	// RequirementLevel: Optional
+	// Stability: experimental
+	AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+	return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+	// AWSDynamoDBCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.count" semantic conventions. It represents the value of
+	// the `Count` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+	// AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+	// "aws.dynamodb.scanned_count" semantic conventions. It represents the
+	// value of the `ScannedCount` response parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 50
+	AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+	// AWSDynamoDBSegmentKey is the attribute Key conforming to the
+	// "aws.dynamodb.segment" semantic conventions. It represents the value of
+	// the `Segment` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 10
+	AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+	// AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+	// "aws.dynamodb.total_segments" semantic conventions. It represents the
+	// value of the `TotalSegments` request parameter.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 100
+	AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+	return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+	return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+	return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+	return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+	// AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+	// the "aws.dynamodb.attribute_definitions" semantic conventions. It
+	// represents the JSON-serialized value of each item in the
+	// `AttributeDefinitions` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+	AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+	// AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+	// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+	// conventions. It represents the JSON-serialized value of each item in the
+	// the `GlobalSecondaryIndexUpdates` request field.
+	//
+	// Type: string[]
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+	// "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+	// "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+	// "ProvisionedThroughput": { "ReadCapacityUnits": number,
+	// "WriteCapacityUnits": number } }'
+	AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+	return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+	return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Attributes that exist for S3 request types.
+const (
+	// AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+	// semantic conventions. It represents the S3 bucket name the request
+	// refers to. Corresponds to the `--bucket` parameter of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// operations.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'some-bucket-name'
+	// Note: The `bucket` attribute is applicable to all S3 operations that
+	// reference a bucket, i.e. that require the bucket name as a mandatory
+	// parameter.
+	// This applies to almost all S3 operations except `list-buckets`.
+	AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+	// AWSS3CopySourceKey is the attribute Key conforming to the
+	// "aws.s3.copy_source" semantic conventions. It represents the source
+	// object (in the form `bucket`/`key`) for the copy operation.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'someFile.yml'
+	// Note: The `copy_source` attribute applies to S3 copy operations and
+	// corresponds to the `--copy-source` parameter
+	// of the [copy-object operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+	// AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+	// semantic conventions. It represents the delete request container that
+	// specifies the objects to be deleted.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples:
+	// 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+	// Note: The `delete` attribute is only applicable to the
+	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+	// operation.
+	// The `delete` attribute corresponds to the `--delete` parameter of the
+	// [delete-objects operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+	AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+	// AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+	// conventions. It represents the S3 object key the request refers to.
+	// Corresponds to the `--key` parameter of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// operations.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'someFile.yml'
+	// Note: The `key` attribute is applicable to all object-related S3
+	// operations, i.e. that require the object key as a mandatory parameter.
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+	// -
+	// [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+	// -
+	// [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+	// -
+	// [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+	// -
+	// [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+	// -
+	// [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+	// -
+	// [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+	// -
+	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+	// -
+	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+	// -
+	// [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+	// -
+	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+	// -
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+	// AWSS3PartNumberKey is the attribute Key conforming to the
+	// "aws.s3.part_number" semantic conventions. It represents the part number
+	// of the part being uploaded in a multipart-upload operation. This is a
+	// positive integer between 1 and 10,000.
+	//
+	// Type: int
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 3456
+	// Note: The `part_number` attribute is only applicable to the
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// and
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	// operations.
+	// The `part_number` attribute corresponds to the `--part-number` parameter
+	// of the
+	// [upload-part operation within the S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+	AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+	// AWSS3UploadIDKey is the attribute Key conforming to the
+	// "aws.s3.upload_id" semantic conventions. It represents the upload ID
+	// that identifies the multipart upload.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+	// Note: The `upload_id` attribute applies to S3 multipart-upload
+	// operations and corresponds to the `--upload-id` parameter
+	// of the [S3
+	// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+	// multipart operations.
+	// This applies in particular to the following operations:
+	//
+	// -
+	// [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+	// -
+	// [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+	// -
+	// [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+	// -
+	// [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+	// -
+	// [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+	AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+	return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+	return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+	return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+	return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+	return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+	return AWSS3UploadIDKey.String(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+	// GraphqlDocumentKey is the attribute Key conforming to the
+	// "graphql.document" semantic conventions. It represents the GraphQL
+	// document being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+	// Note: The value may be sanitized to exclude sensitive information.
+	GraphqlDocumentKey = attribute.Key("graphql.document")
+
+	// GraphqlOperationNameKey is the attribute Key conforming to the
+	// "graphql.operation.name" semantic conventions. It represents the name of
+	// the operation being executed.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'findBookByID'
+	GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+	// GraphqlOperationTypeKey is the attribute Key conforming to the
+	// "graphql.operation.type" semantic conventions. It represents the type of
+	// the operation being executed.
+	//
+	// Type: Enum
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'query', 'mutation', 'subscription'
+	GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+	// GraphQL query
+	GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+	// GraphQL mutation
+	GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+	// GraphQL subscription
+	GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+	return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+	return GraphqlOperationNameKey.String(val)
+}
+
+// General attributes used in messaging systems.
+const (
+	// MessagingBatchMessageCountKey is the attribute Key conforming to the
+	// "messaging.batch.message_count" semantic conventions. It represents the
+	// number of messages sent, received, or processed in the scope of the
+	// batching operation.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If the span describes an
+	// operation on a batch of messages.)
+	// Stability: experimental
+	// Examples: 0, 1, 2
+	// Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+	// spans that operate with a single message. When a messaging client
+	// library supports both batch and single-message API for the same
+	// operation, instrumentations SHOULD use `messaging.batch.message_count`
+	// for batching APIs and SHOULD NOT use it for single-message APIs.
+	MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+	// MessagingClientIDKey is the attribute Key conforming to the
+	// "messaging.client_id" semantic conventions. It represents a unique
+	// identifier for the client that consumes or produces a message.
+	//
+	// Type: string
+	// RequirementLevel: Recommended (If a client id is available)
+	// Stability: experimental
+	// Examples: 'client-5', 'myhost@8742@s8083jm'
+	MessagingClientIDKey = attribute.Key("messaging.client_id")
+
+	// MessagingOperationKey is the attribute Key conforming to the
+	// "messaging.operation" semantic conventions. It represents a string
+	// identifying the kind of messaging operation as defined in the [Operation
+	// names](#operation-names) section above.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Note: If a custom value is used, it MUST be of low cardinality.
+	MessagingOperationKey = attribute.Key("messaging.operation")
+
+	// MessagingSystemKey is the attribute Key conforming to the
+	// "messaging.system" semantic conventions. It represents a string
+	// identifying the messaging system.
+	//
+	// Type: string
+	// RequirementLevel: Required
+	// Stability: experimental
+	// Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+	MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+	// publish
+	MessagingOperationPublish = MessagingOperationKey.String("publish")
+	// receive
+	MessagingOperationReceive = MessagingOperationKey.String("receive")
+	// process
+	MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+	return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client_id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+	return MessagingClientIDKey.String(val)
+}
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+	return MessagingSystemKey.String(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+	// RPCMethodKey is the attribute Key conforming to the "rpc.method"
+	// semantic conventions. It represents the name of the (logical) method
+	// being called, must be equal to the $method part in the span name.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'exampleMethod'
+	// Note: This is the logical name of the method from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// method/function. The `code.function` attribute may be used to store the
+	// latter (e.g., method actually executing the call on the server side, RPC
+	// client stub method on the client side).
+	RPCMethodKey = attribute.Key("rpc.method")
+
+	// RPCServiceKey is the attribute Key conforming to the "rpc.service"
+	// semantic conventions. It represents the full (logical) name of the
+	// service being called, including its package name, if applicable.
+	//
+	// Type: string
+	// RequirementLevel: Recommended
+	// Stability: experimental
+	// Examples: 'myservice.EchoService'
+	// Note: This is the logical name of the service from the RPC interface
+	// perspective, which can be different from the name of any implementing
+	// class. The `code.namespace` attribute may be used to store the latter
+	// (despite the attribute name, it may include a class name; e.g., class
+	// with method actually executing the call on the server side, RPC client
+	// stub class on the client side).
+	RPCServiceKey = attribute.Key("rpc.service")
+
+	// RPCSystemKey is the attribute Key conforming to the "rpc.system"
+	// semantic conventions. It represents a string identifying the remoting
+	// system. See below for a list of well-known identifiers.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+	// gRPC
+	RPCSystemGRPC = RPCSystemKey.String("grpc")
+	// Java RMI
+	RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+	// .NET WCF
+	RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+	// Apache Dubbo
+	RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+	// Connect RPC
+	RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+	return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+	return RPCServiceKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+	// RPCGRPCStatusCodeKey is the attribute Key conforming to the
+	// "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+	// status
+	// code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+	// the gRPC request.
+	//
+	// Type: Enum
+	// RequirementLevel: Required
+	// Stability: experimental
+	RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+	// OK
+	RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+	// CANCELLED
+	RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+	// UNKNOWN
+	RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+	// INVALID_ARGUMENT
+	RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+	// DEADLINE_EXCEEDED
+	RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+	// NOT_FOUND
+	RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+	// ALREADY_EXISTS
+	RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+	// PERMISSION_DENIED
+	RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+	// RESOURCE_EXHAUSTED
+	RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+	// FAILED_PRECONDITION
+	RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+	// ABORTED
+	RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+	// OUT_OF_RANGE
+	RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+	// UNIMPLEMENTED
+	RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+	// INTERNAL
+	RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+	// UNAVAILABLE
+	RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+	// DATA_LOSS
+	RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+	// UNAUTHENTICATED
+	RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+	// RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+	// `error.code` property of response if it is an error response.
+	//
+	// Type: int
+	// RequirementLevel: ConditionallyRequired (If response is not successful.)
+	// Stability: experimental
+	// Examples: -32700, 100
+	RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+	// RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+	// `error.message` property of response if it is an error response.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: 'Parse error', 'User already exists'
+	RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+	// RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+	// property of request or response. Since protocol allows id to be int,
+	// string, `null` or missing (for notifications), value is expected to be
+	// cast to string for simplicity. Use empty string in case of `null` value.
+	// Omit entirely if this is a notification.
+	//
+	// Type: string
+	// RequirementLevel: Optional
+	// Stability: experimental
+	// Examples: '10', 'request-7', ''
+	RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+	// RPCJsonrpcVersionKey is the attribute Key conforming to the
+	// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+	// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+	// does not specify this, the value can be omitted.
+	//
+	// Type: string
+	// RequirementLevel: ConditionallyRequired (If other than the default
+	// version (`1.0`))
+	// Stability: experimental
+	// Examples: '2.0', '1.0'
+	RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+	return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+	return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+	return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+	return RPCJsonrpcVersionKey.String(val)
+}
+
+// Tech-specific attributes for Connect RPC.
+const (
+	// RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+	// "rpc.connect_rpc.error_code" semantic conventions. It represents the
+	// [error codes](https://connect.build/docs/protocol/#error-codes) of the
+	// Connect request. Error codes are always string values.
+	//
+	// Type: Enum
+	// RequirementLevel: ConditionallyRequired (If response is not successful
+	// and if error code available.)
+	// Stability: experimental
+	RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+)
+
+var (
+	// cancelled
+	RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+	// unknown
+	RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+	// invalid_argument
+	RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+	// deadline_exceeded
+	RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+	// not_found
+	RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+	// already_exists
+	RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+	// permission_denied
+	RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+	// resource_exhausted
+	RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+	// failed_precondition
+	RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+	// aborted
+	RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+	// out_of_range
+	RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+	// unimplemented
+	RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+	// internal
+	RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+	// unavailable
+	RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+	// data_loss
+	RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+	// unauthenticated
+	RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
diff --git a/trace/config.go b/trace/config.go
index cb3efbb9ad8..3aadc66cf7a 100644
--- a/trace/config.go
+++ b/trace/config.go
@@ -268,6 +268,7 @@ func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
 	c.stackTrace = bool(o)
 	return c
 }
+
 func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
 	c.stackTrace = bool(o)
 	return c
diff --git a/trace/doc.go b/trace/doc.go
index ab0346f9664..440f3d7565a 100644
--- a/trace/doc.go
+++ b/trace/doc.go
@@ -62,5 +62,69 @@ a default.
 		defer span.End()
 		// ...
 	}
+
+# API Implementations
+
+This package does not conform to the standard Go versioning policy; all of its
+interfaces may have methods added to them without a package major version bump.
+This non-standard API evolution could surprise an uninformed implementation
+author. They could unknowingly build their implementation in a way that would
+result in a runtime panic for their users that update to the new API.
+
+The API is designed to help inform an instrumentation author about this
+non-standard API evolution. It requires them to choose a default behavior for
+unimplemented interface methods. There are three behavior choices they can
+make:
+
+  - Compilation failure
+  - Panic
+  - Default to another implementation
+
+All interfaces in this API embed a corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default
+behavior of their implementations to be a compilation failure, signaling to
+their users they need to update to the latest version of that implementation,
+they need to embed the corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded] in their implementation. For
+example,
+
+	import "go.opentelemetry.io/otel/trace/embedded"
+
+	type TracerProvider struct {
+		embedded.TracerProvider
+		// ...
+	}
+
+If an author wants the default behavior of their implementations to panic, they
+can embed the API interface directly.
+
+	import "go.opentelemetry.io/otel/trace"
+
+	type TracerProvider struct {
+		trace.TracerProvider
+		// ...
+	}
+
+This option is not recommended. It will lead to publishing packages that
+contain runtime panics when users update to newer versions of
+[go.opentelemetry.io/otel/trace], which may be done with a trasitive
+dependency.
+
+Finally, an author can embed another implementation in theirs. The embedded
+implementation will be used for methods not defined by the author. For example,
+an author who wants to default to silently dropping the call can use
+[go.opentelemetry.io/otel/trace/noop]:
+
+	import "go.opentelemetry.io/otel/trace/noop"
+
+	type TracerProvider struct {
+		noop.TracerProvider
+		// ...
+	}
+
+It is strongly recommended that authors only embed
+[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior.
+That implementation is the only one OpenTelemetry authors can guarantee will
+fully implement all the API interfaces when a user updates their API.
 */
 package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/trace/embedded/embedded.go b/trace/embedded/embedded.go
new file mode 100644
index 00000000000..898db5a7546
--- /dev/null
+++ b/trace/embedded/embedded.go
@@ -0,0 +1,56 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package embedded provides interfaces embedded within the [OpenTelemetry
+// trace API].
+//
+// Implementers of the [OpenTelemetry trace API] can embed the relevant type
+// from this package into their implementation directly. Doing so will result
+// in a compilation error for users when the [OpenTelemetry trace API] is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+//
+// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace
+package embedded // import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider is embedded in
+// [go.opentelemetry.io/otel/trace.TracerProvider].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type TracerProvider interface{ tracerProvider() }
+
+// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Tracer interface{ tracer() }
+
+// Span is embedded in [go.opentelemetry.io/otel/trace.Span].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Span interface{ span() }
diff --git a/trace/go.mod b/trace/go.mod
index 9b6953dcb8f..4dda77a8cb4 100644
--- a/trace/go.mod
+++ b/trace/go.mod
@@ -1,13 +1,13 @@
 module go.opentelemetry.io/otel/trace
 
-go 1.19
+go 1.20
 
 replace go.opentelemetry.io/otel => ../
 
 require (
-	github.com/google/go-cmp v0.5.9
+	github.com/google/go-cmp v0.6.0
 	github.com/stretchr/testify v1.8.4
-	go.opentelemetry.io/otel v1.16.0
+	go.opentelemetry.io/otel v1.21.0
 )
 
 require (
diff --git a/trace/go.sum b/trace/go.sum
index 6b8b46ca828..55961e4a867 100644
--- a/trace/go.sum
+++ b/trace/go.sum
@@ -1,7 +1,7 @@
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
-github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
diff --git a/trace/noop.go b/trace/noop.go
index 7cf6c7f3ef9..c125491caeb 100644
--- a/trace/noop.go
+++ b/trace/noop.go
@@ -19,16 +19,20 @@ import (
 
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace/embedded"
 )
 
 // NewNoopTracerProvider returns an implementation of TracerProvider that
 // performs no operations. The Tracer and Spans created from the returned
 // TracerProvider also perform no operations.
+//
+// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider]
+// instead.
 func NewNoopTracerProvider() TracerProvider {
 	return noopTracerProvider{}
 }
 
-type noopTracerProvider struct{}
+type noopTracerProvider struct{ embedded.TracerProvider }
 
 var _ TracerProvider = noopTracerProvider{}
 
@@ -38,7 +42,7 @@ func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
 }
 
 // noopTracer is an implementation of Tracer that performs no operations.
-type noopTracer struct{}
+type noopTracer struct{ embedded.Tracer }
 
 var _ Tracer = noopTracer{}
 
@@ -54,7 +58,7 @@ func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption
 }
 
 // noopSpan is an implementation of Span that performs no operations.
-type noopSpan struct{}
+type noopSpan struct{ embedded.Span }
 
 var _ Span = noopSpan{}
 
diff --git a/trace/noop/noop.go b/trace/noop/noop.go
new file mode 100644
index 00000000000..7f485543c47
--- /dev/null
+++ b/trace/noop/noop.go
@@ -0,0 +1,118 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package noop provides an implementation of the OpenTelemetry trace API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry trace API will effectively
+// disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry trace API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/trace/noop"
+
+import (
+	"context"
+
+	"go.opentelemetry.io/otel/attribute"
+	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+)
+
+var (
+	// Compile-time check this implements the OpenTelemetry API.
+
+	_ trace.TracerProvider = TracerProvider{}
+	_ trace.Tracer         = Tracer{}
+	_ trace.Span           = Span{}
+)
+
+// TracerProvider is an OpenTelemetry No-Op TracerProvider.
+type TracerProvider struct{ embedded.TracerProvider }
+
+// NewTracerProvider returns a TracerProvider that does not record any telemetry.
+func NewTracerProvider() TracerProvider {
+	return TracerProvider{}
+}
+
+// Tracer returns an OpenTelemetry Tracer that does not record any telemetry.
+func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer {
+	return Tracer{}
+}
+
+// Tracer is an OpenTelemetry No-Op Tracer.
+type Tracer struct{ embedded.Tracer }
+
+// Start creates a span. The created span will be set in a child context of ctx
+// and returned with the span.
+//
+// If ctx contains a span context, the returned span will also contain that
+// span context. If the span context in ctx is for a non-recording span, that
+// span instance will be returned directly.
+func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
+	span := trace.SpanFromContext(ctx)
+
+	// If the parent context contains a non-zero span context, that span
+	// context needs to be returned as a non-recording span
+	// (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk).
+	var zeroSC trace.SpanContext
+	if sc := span.SpanContext(); !sc.Equal(zeroSC) {
+		if !span.IsRecording() {
+			// If the span is not recording return it directly.
+			return ctx, span
+		}
+		// Otherwise, return the span context needs in a non-recording span.
+		span = Span{sc: sc}
+	} else {
+		// No parent, return a No-Op span with an empty span context.
+		span = Span{}
+	}
+	return trace.ContextWithSpan(ctx, span), span
+}
+
+// Span is an OpenTelemetry No-Op Span.
+type Span struct {
+	embedded.Span
+
+	sc trace.SpanContext
+}
+
+// SpanContext returns an empty span context.
+func (s Span) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (Span) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (Span) SetStatus(codes.Code, string) {}
+
+// SetAttributes does nothing.
+func (Span) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (Span) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (Span) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (Span) AddEvent(string, ...trace.EventOption) {}
+
+// SetName does nothing.
+func (Span) SetName(string) {}
+
+// TracerProvider returns a No-Op TracerProvider.
+func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} }
diff --git a/trace/noop/noop_test.go b/trace/noop/noop_test.go
new file mode 100644
index 00000000000..2b0f7818e20
--- /dev/null
+++ b/trace/noop/noop_test.go
@@ -0,0 +1,117 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package noop // import "go.opentelemetry.io/otel/trace/noop"
+
+import (
+	"context"
+	"reflect"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+
+	"go.opentelemetry.io/otel/trace"
+)
+
+func TestImplementationNoPanics(t *testing.T) {
+	// Check that if type has an embedded interface and that interface has
+	// methods added to it than the No-Op implementation implements them.
+	t.Run("TracerProvider", assertAllExportedMethodNoPanic(
+		reflect.ValueOf(TracerProvider{}),
+		reflect.TypeOf((*trace.TracerProvider)(nil)).Elem(),
+	))
+	t.Run("Meter", assertAllExportedMethodNoPanic(
+		reflect.ValueOf(Tracer{}),
+		reflect.TypeOf((*trace.Tracer)(nil)).Elem(),
+	))
+	t.Run("Span", assertAllExportedMethodNoPanic(
+		reflect.ValueOf(Span{}),
+		reflect.TypeOf((*trace.Span)(nil)).Elem(),
+	))
+}
+
+func assertAllExportedMethodNoPanic(rVal reflect.Value, rType reflect.Type) func(*testing.T) {
+	return func(t *testing.T) {
+		for n := 0; n < rType.NumMethod(); n++ {
+			mType := rType.Method(n)
+			if !mType.IsExported() {
+				t.Logf("ignoring unexported %s", mType.Name)
+				continue
+			}
+			m := rVal.MethodByName(mType.Name)
+			if !m.IsValid() {
+				t.Errorf("unknown method for %s: %s", rVal.Type().Name(), mType.Name)
+			}
+
+			numIn := mType.Type.NumIn()
+			if mType.Type.IsVariadic() {
+				numIn--
+			}
+			args := make([]reflect.Value, numIn)
+			ctx := context.Background()
+			for i := range args {
+				aType := mType.Type.In(i)
+				if aType.Name() == "Context" {
+					// Do not panic on a nil context.
+					args[i] = reflect.ValueOf(ctx)
+				} else {
+					args[i] = reflect.New(aType).Elem()
+				}
+			}
+
+			assert.NotPanicsf(t, func() {
+				_ = m.Call(args)
+			}, "%s.%s", rVal.Type().Name(), mType.Name)
+		}
+	}
+}
+
+func TestNewTracerProvider(t *testing.T) {
+	tp := NewTracerProvider()
+	assert.Equal(t, tp, TracerProvider{})
+	tracer := tp.Tracer("")
+	assert.Equal(t, tracer, Tracer{})
+}
+
+func TestTracerStartPropagatesSpanContext(t *testing.T) {
+	tracer := NewTracerProvider().Tracer("")
+	spanCtx := trace.SpanContext{}
+
+	ctx := trace.ContextWithSpanContext(context.Background(), spanCtx)
+	ctx, span := tracer.Start(ctx, "test_span")
+	assert.Equal(t, spanCtx, trace.SpanContextFromContext(ctx), "empty span context not set in context")
+	assert.IsType(t, Span{}, span, "non-noop span returned")
+	assert.Equal(t, spanCtx, span.SpanContext(), "empty span context not returned from span")
+	assert.False(t, span.IsRecording(), "empty span context returned recording span")
+
+	spanCtx = spanCtx.WithTraceID(trace.TraceID([16]byte{1}))
+	spanCtx = spanCtx.WithSpanID(trace.SpanID([8]byte{1}))
+	ctx = trace.ContextWithSpanContext(context.Background(), spanCtx)
+	ctx, span = tracer.Start(ctx, "test_span")
+	assert.Equal(t, spanCtx, trace.SpanContextFromContext(ctx), "non-empty span context not set in context")
+	assert.Equal(t, spanCtx, span.SpanContext(), "non-empty span context not returned from span")
+	assert.False(t, span.IsRecording(), "non-empty span context returned recording span")
+
+	rSpan := recordingSpan{Span: Span{sc: spanCtx}}
+	ctx = trace.ContextWithSpan(context.Background(), rSpan)
+	ctx, span = tracer.Start(ctx, "test_span")
+	assert.Equal(t, spanCtx, trace.SpanContextFromContext(ctx), "recording span's span context not set in context")
+	assert.IsType(t, Span{}, span, "non-noop span returned")
+	assert.Equal(t, spanCtx, span.SpanContext(), "recording span's span context not returned from span")
+	assert.False(t, span.IsRecording(), "recording span returned")
+}
+
+type recordingSpan struct{ Span }
+
+func (recordingSpan) IsRecording() bool { return true }
diff --git a/trace/trace.go b/trace/trace.go
index 4aa94f79f46..26a4b2260ec 100644
--- a/trace/trace.go
+++ b/trace/trace.go
@@ -22,6 +22,7 @@ import (
 
 	"go.opentelemetry.io/otel/attribute"
 	"go.opentelemetry.io/otel/codes"
+	"go.opentelemetry.io/otel/trace/embedded"
 )
 
 const (
@@ -48,8 +49,10 @@ func (e errorConst) Error() string {
 // nolint:revive // revive complains about stutter of `trace.TraceID`.
 type TraceID [16]byte
 
-var nilTraceID TraceID
-var _ json.Marshaler = nilTraceID
+var (
+	nilTraceID TraceID
+	_          json.Marshaler = nilTraceID
+)
 
 // IsValid checks whether the trace TraceID is valid. A valid trace ID does
 // not consist of zeros only.
@@ -71,8 +74,10 @@ func (t TraceID) String() string {
 // SpanID is a unique identity of a span in a trace.
 type SpanID [8]byte
 
-var nilSpanID SpanID
-var _ json.Marshaler = nilSpanID
+var (
+	nilSpanID SpanID
+	_         json.Marshaler = nilSpanID
+)
 
 // IsValid checks whether the SpanID is valid. A valid SpanID does not consist
 // of zeros only.
@@ -338,8 +343,15 @@ func (sc SpanContext) MarshalJSON() ([]byte, error) {
 // create a Span and it is then up to the operation the Span represents to
 // properly end the Span when the operation itself ends.
 //
-// Warning: methods may be added to this interface in minor releases.
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
 type Span interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Span
+
 	// End completes the Span. The Span is considered complete and ready to be
 	// delivered through the rest of the telemetry pipeline after this method
 	// is called. Therefore, updates to the Span are not allowed after this
@@ -486,8 +498,15 @@ func (sk SpanKind) String() string {
 
 // Tracer is the creator of Spans.
 //
-// Warning: methods may be added to this interface in minor releases.
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
 type Tracer interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.Tracer
+
 	// Start creates a span and a context.Context containing the newly-created span.
 	//
 	// If the context.Context provided in `ctx` contains a Span then the newly-created
@@ -518,8 +537,15 @@ type Tracer interface {
 // at runtime from its users or it can simply use the globally registered one
 // (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
 //
-// Warning: methods may be added to this interface in minor releases.
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
 type TracerProvider interface {
+	// Users of the interface can ignore this. This embedded type is only used
+	// by implementations of this interface. See the "API Implementations"
+	// section of the package documentation for more information.
+	embedded.TracerProvider
+
 	// Tracer returns a unique Tracer scoped to be used by instrumentation code
 	// to trace computational workflows. The scope and identity of that
 	// instrumentation code is uniquely defined by the name and options passed.
diff --git a/trace/trace_test.go b/trace/trace_test.go
index 42003822037..9bde9d8c79b 100644
--- a/trace/trace_test.go
+++ b/trace/trace_test.go
@@ -309,7 +309,7 @@ func TestSpanContextHasTraceID(t *testing.T) {
 		},
 	} {
 		t.Run(testcase.name, func(t *testing.T) {
-			//proto: func (sc SpanContext) HasTraceID() bool{}
+			// proto: func (sc SpanContext) HasTraceID() bool{}
 			sc := SpanContext{traceID: testcase.tid}
 			have := sc.HasTraceID()
 			if have != testcase.want {
@@ -336,7 +336,7 @@ func TestSpanContextHasSpanID(t *testing.T) {
 		},
 	} {
 		t.Run(testcase.name, func(t *testing.T) {
-			//proto: func (sc SpanContext) HasSpanID() bool {}
+			// proto: func (sc SpanContext) HasSpanID() bool {}
 			have := testcase.sc.HasSpanID()
 			if have != testcase.want {
 				t.Errorf("Want: %v, but have: %v", testcase.want, have)
@@ -435,7 +435,7 @@ func TestStringTraceID(t *testing.T) {
 		},
 	} {
 		t.Run(testcase.name, func(t *testing.T) {
-			//proto: func (t TraceID) String() string {}
+			// proto: func (t TraceID) String() string {}
 			have := testcase.tid.String()
 			if have != testcase.want {
 				t.Errorf("Want: %s, but have: %s", testcase.want, have)
@@ -462,7 +462,7 @@ func TestStringSpanID(t *testing.T) {
 		},
 	} {
 		t.Run(testcase.name, func(t *testing.T) {
-			//proto: func (t TraceID) String() string {}
+			// proto: func (t TraceID) String() string {}
 			have := testcase.sid.String()
 			if have != testcase.want {
 				t.Errorf("Want: %s, but have: %s", testcase.want, have)
@@ -481,17 +481,14 @@ func TestValidateSpanKind(t *testing.T) {
 			SpanKindInternal,
 		},
 		{
-
 			SpanKindInternal,
 			SpanKindInternal,
 		},
 		{
-
 			SpanKindServer,
 			SpanKindServer,
 		},
 		{
-
 			SpanKindClient,
 			SpanKindClient,
 		},
@@ -521,17 +518,14 @@ func TestSpanKindString(t *testing.T) {
 			"unspecified",
 		},
 		{
-
 			SpanKindInternal,
 			"internal",
 		},
 		{
-
 			SpanKindServer,
 			"server",
 		},
 		{
-
 			SpanKindClient,
 			"client",
 		},
diff --git a/trace/tracestate.go b/trace/tracestate.go
index ca68a82e5f7..db936ba5b73 100644
--- a/trace/tracestate.go
+++ b/trace/tracestate.go
@@ -17,20 +17,14 @@ package trace // import "go.opentelemetry.io/otel/trace"
 import (
 	"encoding/json"
 	"fmt"
-	"regexp"
 	"strings"
 )
 
 const (
 	maxListMembers = 32
 
-	listDelimiter = ","
-
-	// based on the W3C Trace Context specification, see
-	// https://www.w3.org/TR/trace-context-1/#tracestate-header
-	noTenantKeyFormat   = `[a-z][_0-9a-z\-\*\/]{0,255}`
-	withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}`
-	valueFormat         = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]`
+	listDelimiters  = ","
+	memberDelimiter = "="
 
 	errInvalidKey    errorConst = "invalid tracestate key"
 	errInvalidValue  errorConst = "invalid tracestate value"
@@ -39,43 +33,138 @@ const (
 	errDuplicate     errorConst = "duplicate list-member in tracestate"
 )
 
-var (
-	keyRe    = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`)
-	valueRe  = regexp.MustCompile(`^(` + valueFormat + `)$`)
-	memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`)
-)
-
 type member struct {
 	Key   string
 	Value string
 }
 
+// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) )
+// means (chr = %x20-2B / %x2D-3C / %x3E-7E) .
+func checkValueChar(v byte) bool {
+	return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) .
+func checkValueLast(v byte) bool {
+	return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// based on the W3C Trace Context specification
+//
+//	value    = (0*255(chr)) nblk-chr
+//	nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
+//	chr      = %x20 / nblk-chr
+//
+// see https://www.w3.org/TR/trace-context-1/#value
+func checkValue(val string) bool {
+	n := len(val)
+	if n == 0 || n > 256 {
+		return false
+	}
+	for i := 0; i < n-1; i++ {
+		if !checkValueChar(val[i]) {
+			return false
+		}
+	}
+	return checkValueLast(val[n-1])
+}
+
+func checkKeyRemain(key string) bool {
+	// ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+	for _, v := range key {
+		if isAlphaNum(byte(v)) {
+			continue
+		}
+		switch v {
+		case '_', '-', '*', '/':
+			continue
+		}
+		return false
+	}
+	return true
+}
+
+// according to
+//
+//	simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//	system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//
+// param n is remain part length, should be 255 in simple-key or 13 in system-id.
+func checkKeyPart(key string, n int) bool {
+	if len(key) == 0 {
+		return false
+	}
+	first := key[0] // key's first char
+	ret := len(key[1:]) <= n
+	ret = ret && first >= 'a' && first <= 'z'
+	return ret && checkKeyRemain(key[1:])
+}
+
+func isAlphaNum(c byte) bool {
+	if c >= 'a' && c <= 'z' {
+		return true
+	}
+	return c >= '0' && c <= '9'
+}
+
+// according to
+//
+//	tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+//
+// param n is remain part length, should be 240 exactly.
+func checkKeyTenant(key string, n int) bool {
+	if len(key) == 0 {
+		return false
+	}
+	return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
+}
+
+// based on the W3C Trace Context specification
+//
+//	key = simple-key / multi-tenant-key
+//	simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//	multi-tenant-key = tenant-id "@" system-id
+//	tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//	system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//	lcalpha    = %x61-7A ; a-z
+//
+// see https://www.w3.org/TR/trace-context-1/#tracestate-header.
+func checkKey(key string) bool {
+	tenant, system, ok := strings.Cut(key, "@")
+	if !ok {
+		return checkKeyPart(key, 255)
+	}
+	return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13)
+}
+
 func newMember(key, value string) (member, error) {
-	if !keyRe.MatchString(key) {
-		return member{}, fmt.Errorf("%w: %s", errInvalidKey, key)
+	if !checkKey(key) {
+		return member{}, errInvalidKey
 	}
-	if !valueRe.MatchString(value) {
-		return member{}, fmt.Errorf("%w: %s", errInvalidValue, value)
+	if !checkValue(value) {
+		return member{}, errInvalidValue
 	}
 	return member{Key: key, Value: value}, nil
 }
 
 func parseMember(m string) (member, error) {
-	matches := memberRe.FindStringSubmatch(m)
-	if len(matches) != 5 {
+	key, val, ok := strings.Cut(m, memberDelimiter)
+	if !ok {
 		return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
 	}
-
-	return member{
-		Key:   matches[1],
-		Value: matches[4],
-	}, nil
+	key = strings.TrimLeft(key, " \t")
+	val = strings.TrimRight(val, " \t")
+	result, e := newMember(key, val)
+	if e != nil {
+		return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
+	}
+	return result, nil
 }
 
 // String encodes member into a string compliant with the W3C Trace Context
 // specification.
 func (m member) String() string {
-	return fmt.Sprintf("%s=%s", m.Key, m.Value)
+	return m.Key + "=" + m.Value
 }
 
 // TraceState provides additional vendor-specific trace identification
@@ -99,8 +188,8 @@ var _ json.Marshaler = TraceState{}
 // ParseTraceState attempts to decode a TraceState from the passed
 // string. It returns an error if the input is invalid according to the W3C
 // Trace Context specification.
-func ParseTraceState(tracestate string) (TraceState, error) {
-	if tracestate == "" {
+func ParseTraceState(ts string) (TraceState, error) {
+	if ts == "" {
 		return TraceState{}, nil
 	}
 
@@ -110,7 +199,9 @@ func ParseTraceState(tracestate string) (TraceState, error) {
 
 	var members []member
 	found := make(map[string]struct{})
-	for _, memberStr := range strings.Split(tracestate, listDelimiter) {
+	for ts != "" {
+		var memberStr string
+		memberStr, ts, _ = strings.Cut(ts, listDelimiters)
 		if len(memberStr) == 0 {
 			continue
 		}
@@ -143,11 +234,29 @@ func (ts TraceState) MarshalJSON() ([]byte, error) {
 // Trace Context specification. The returned string will be invalid if the
 // TraceState contains any invalid members.
 func (ts TraceState) String() string {
-	members := make([]string, len(ts.list))
-	for i, m := range ts.list {
-		members[i] = m.String()
+	if len(ts.list) == 0 {
+		return ""
+	}
+	var n int
+	n += len(ts.list)     // member delimiters: '='
+	n += len(ts.list) - 1 // list delimiters: ','
+	for _, mem := range ts.list {
+		n += len(mem.Key)
+		n += len(mem.Value)
 	}
-	return strings.Join(members, listDelimiter)
+
+	var sb strings.Builder
+	sb.Grow(n)
+	_, _ = sb.WriteString(ts.list[0].Key)
+	_ = sb.WriteByte('=')
+	_, _ = sb.WriteString(ts.list[0].Value)
+	for i := 1; i < len(ts.list); i++ {
+		_ = sb.WriteByte(listDelimiters[0])
+		_, _ = sb.WriteString(ts.list[i].Key)
+		_ = sb.WriteByte('=')
+		_, _ = sb.WriteString(ts.list[i].Value)
+	}
+	return sb.String()
 }
 
 // Get returns the value paired with key from the corresponding TraceState
@@ -179,15 +288,25 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) {
 	if err != nil {
 		return ts, err
 	}
-
-	cTS := ts.Delete(key)
-	if cTS.Len()+1 <= maxListMembers {
-		cTS.list = append(cTS.list, member{})
+	n := len(ts.list)
+	found := n
+	for i := range ts.list {
+		if ts.list[i].Key == key {
+			found = i
+		}
+	}
+	cTS := TraceState{}
+	if found == n && n < maxListMembers {
+		cTS.list = make([]member, n+1)
+	} else {
+		cTS.list = make([]member, n)
 	}
-	// When the number of members exceeds capacity, drop the "right-most".
-	copy(cTS.list[1:], cTS.list)
 	cTS.list[0] = m
-
+	// When the number of members exceeds capacity, drop the "right-most".
+	copy(cTS.list[1:], ts.list[0:found])
+	if found < n {
+		copy(cTS.list[1+found:], ts.list[found+1:])
+	}
 	return cTS, nil
 }
 
diff --git a/trace/tracestate_benchkmark_test.go b/trace/tracestate_benchkmark_test.go
new file mode 100644
index 00000000000..171e09f00f8
--- /dev/null
+++ b/trace/tracestate_benchkmark_test.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package trace
+
+import (
+	"testing"
+)
+
+func BenchmarkTraceStateParse(b *testing.B) {
+	for _, test := range testcases {
+		b.Run(test.name, func(b *testing.B) {
+			b.ResetTimer()
+			b.ReportAllocs()
+			for i := 0; i < b.N; i++ {
+				_, _ = ParseTraceState(test.in)
+			}
+		})
+	}
+}
+
+func BenchmarkTraceStateString(b *testing.B) {
+	for _, test := range testcases {
+		if len(test.tracestate.list) == 0 {
+			continue
+		}
+		b.Run(test.name, func(b *testing.B) {
+			b.ResetTimer()
+			b.ReportAllocs()
+			for i := 0; i < b.N; i++ {
+				_ = test.tracestate.String()
+			}
+		})
+	}
+}
+
+func BenchmarkTraceStateInsert(b *testing.B) {
+	for _, test := range insertTestcase {
+		b.Run(test.name, func(b *testing.B) {
+			b.ResetTimer()
+			b.ReportAllocs()
+			for i := 0; i < b.N; i++ {
+				_, _ = test.tracestate.Insert(test.key, test.value)
+			}
+		})
+	}
+}
diff --git a/trace/tracestate_test.go b/trace/tracestate_test.go
index 784c1589219..4cd0fbc45f3 100644
--- a/trace/tracestate_test.go
+++ b/trace/tracestate_test.go
@@ -420,84 +420,85 @@ func TestTraceStateDelete(t *testing.T) {
 	}
 }
 
-func TestTraceStateInsert(t *testing.T) {
-	ts := TraceState{list: []member{
-		{Key: "key1", Value: "val1"},
-		{Key: "key2", Value: "val2"},
-		{Key: "key3", Value: "val3"},
-	}}
+var insertTS = TraceState{list: []member{
+	{Key: "key1", Value: "val1"},
+	{Key: "key2", Value: "val2"},
+	{Key: "key3", Value: "val3"},
+}}
 
-	testCases := []struct {
-		name       string
-		tracestate TraceState
-		key, value string
-		expected   TraceState
-		err        error
-	}{
-		{
-			name:       "add new",
-			tracestate: ts,
-			key:        "key4@vendor",
-			value:      "val4",
-			expected: TraceState{list: []member{
-				{Key: "key4@vendor", Value: "val4"},
-				{Key: "key1", Value: "val1"},
-				{Key: "key2", Value: "val2"},
-				{Key: "key3", Value: "val3"},
-			}},
-		},
-		{
-			name:       "replace",
-			tracestate: ts,
-			key:        "key2",
-			value:      "valX",
-			expected: TraceState{list: []member{
-				{Key: "key2", Value: "valX"},
-				{Key: "key1", Value: "val1"},
-				{Key: "key3", Value: "val3"},
-			}},
-		},
-		{
-			name:       "invalid key",
-			tracestate: ts,
-			key:        "key!",
-			value:      "val",
-			expected:   ts,
-			err:        errInvalidKey,
-		},
-		{
-			name:       "invalid value",
-			tracestate: ts,
-			key:        "key",
-			value:      "v=l",
-			expected:   ts,
-			err:        errInvalidValue,
-		},
-		{
-			name:       "invalid key/value",
-			tracestate: ts,
-			key:        "key!",
-			value:      "v=l",
-			expected:   ts,
-			err:        errInvalidKey,
-		},
-		{
-			name:       "drop the right-most member(oldest) in queue",
-			tracestate: maxMembers,
-			key:        "keyx",
-			value:      "valx",
-			expected: func() TraceState {
-				// Prepend the new element and remove the oldest one, which is over capacity.
-				return TraceState{
-					list: append(
-						[]member{{Key: "keyx", Value: "valx"}},
-						maxMembers.list[:len(maxMembers.list)-1]...,
-					),
-				}
-			}(),
-		}}
+var insertTestcase = []struct {
+	name       string
+	tracestate TraceState
+	key, value string
+	expected   TraceState
+	err        error
+}{
+	{
+		name:       "add new",
+		tracestate: insertTS,
+		key:        "key4@vendor",
+		value:      "val4",
+		expected: TraceState{list: []member{
+			{Key: "key4@vendor", Value: "val4"},
+			{Key: "key1", Value: "val1"},
+			{Key: "key2", Value: "val2"},
+			{Key: "key3", Value: "val3"},
+		}},
+	},
+	{
+		name:       "replace",
+		tracestate: insertTS,
+		key:        "key2",
+		value:      "valX",
+		expected: TraceState{list: []member{
+			{Key: "key2", Value: "valX"},
+			{Key: "key1", Value: "val1"},
+			{Key: "key3", Value: "val3"},
+		}},
+	},
+	{
+		name:       "invalid key",
+		tracestate: insertTS,
+		key:        "key!",
+		value:      "val",
+		expected:   insertTS,
+		err:        errInvalidKey,
+	},
+	{
+		name:       "invalid value",
+		tracestate: insertTS,
+		key:        "key",
+		value:      "v=l",
+		expected:   insertTS,
+		err:        errInvalidValue,
+	},
+	{
+		name:       "invalid key/value",
+		tracestate: insertTS,
+		key:        "key!",
+		value:      "v=l",
+		expected:   insertTS,
+		err:        errInvalidKey,
+	},
+	{
+		name:       "drop the right-most member(oldest) in queue",
+		tracestate: maxMembers,
+		key:        "keyx",
+		value:      "valx",
+		expected: func() TraceState {
+			// Prepend the new element and remove the oldest one, which is over capacity.
+			return TraceState{
+				list: append(
+					[]member{{Key: "keyx", Value: "valx"}},
+					maxMembers.list[:len(maxMembers.list)-1]...,
+				),
+			}
+		}(),
+	},
+}
 
-	for _, tc := range testCases {
+func TestTraceStateInsert(t *testing.T) {
+	for _, tc := range insertTestcase {
 		t.Run(tc.name, func(t *testing.T) {
 			actual, err := tc.tracestate.Insert(tc.key, tc.value)
 			assert.ErrorIs(t, err, tc.err, tc.name)
@@ -551,3 +552,37 @@ func TestTraceStateImmutable(t *testing.T) {
 	assert.Equal(t, v0, ts2.Get(k0))
 	assert.Equal(t, "", ts3.Get(k0))
 }
+
+func BenchmarkParseTraceState(b *testing.B) {
+	benches := []struct {
+		name string
+		in   string
+	}{
+		{
+			name: "single key",
+			in:   "somewhatRealisticKeyLength=someValueAbcdefgh1234567890",
+		},
+		{
+			name: "tenant single key",
+			in:   "somewhatRealisticKeyLength@someTenant=someValueAbcdefgh1234567890",
+		},
+		{
+			name: "three keys",
+			in:   "someKeyName.One=someValue1,someKeyName.Two=someValue2,someKeyName.Three=someValue3",
+		},
+		{
+			name: "tenant three keys",
+			in:   "someKeyName.One@tenant=someValue1,someKeyName.Two@tenant=someValue2,someKeyName.Three@tenant=someValue3",
+		},
+	}
+	for _, bench := range benches {
+		b.Run(bench.name, func(b *testing.B) {
+			b.ReportAllocs()
+			b.ResetTimer()
+
+			for i := 0; i < b.N; i++ {
+				_, _ = ParseTraceState(bench.in)
+			}
+		})
+	}
+}
diff --git a/trace_test.go b/trace_test.go
index 48d245b4116..21829767fd0 100644
--- a/trace_test.go
+++ b/trace_test.go
@@ -20,19 +20,21 @@ import (
 	"github.com/stretchr/testify/assert"
 
 	"go.opentelemetry.io/otel/trace"
+	"go.opentelemetry.io/otel/trace/embedded"
+	"go.opentelemetry.io/otel/trace/noop"
 )
 
-type testTracerProvider struct{}
+type testTracerProvider struct{ embedded.TracerProvider }
 
 var _ trace.TracerProvider = &testTracerProvider{}
 
 func (*testTracerProvider) Tracer(_ string, _ ...trace.TracerOption) trace.Tracer {
-	return trace.NewNoopTracerProvider().Tracer("")
+	return noop.NewTracerProvider().Tracer("")
 }
 
 func TestMultipleGlobalTracerProvider(t *testing.T) {
 	p1 := testTracerProvider{}
-	p2 := trace.NewNoopTracerProvider()
+	p2 := noop.NewTracerProvider()
 	SetTracerProvider(&p1)
 	SetTracerProvider(p2)
 
diff --git a/version.go b/version.go
index c2217a28d68..e2f743585d1 100644
--- a/version.go
+++ b/version.go
@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
 
 // Version is the current release version of OpenTelemetry in use.
 func Version() string {
-	return "1.16.0"
+	return "1.21.0"
 }
diff --git a/versions.yaml b/versions.yaml
index 9dc47532bc2..3c153c9d6fc 100644
--- a/versions.yaml
+++ b/versions.yaml
@@ -14,19 +14,16 @@
 
 module-sets:
   stable-v1:
-    version: v1.16.0
+    version: v1.21.0
     modules:
       - go.opentelemetry.io/otel
       - go.opentelemetry.io/otel/bridge/opentracing
       - go.opentelemetry.io/otel/bridge/opentracing/test
-      - go.opentelemetry.io/otel/example/fib
-      - go.opentelemetry.io/otel/example/jaeger
+      - go.opentelemetry.io/otel/example/dice
       - go.opentelemetry.io/otel/example/namedtracer
       - go.opentelemetry.io/otel/example/otel-collector
       - go.opentelemetry.io/otel/example/passthrough
       - go.opentelemetry.io/otel/example/zipkin
-      - go.opentelemetry.io/otel/exporters/jaeger
-      - go.opentelemetry.io/otel/exporters/otlp/internal/retry
       - go.opentelemetry.io/otel/exporters/otlp/otlptrace
       - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
       - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
@@ -34,23 +31,21 @@ module-sets:
       - go.opentelemetry.io/otel/exporters/zipkin
       - go.opentelemetry.io/otel/metric
       - go.opentelemetry.io/otel/sdk
+      - go.opentelemetry.io/otel/sdk/metric
       - go.opentelemetry.io/otel/trace
   experimental-metrics:
-    version: v0.39.0
+    version: v0.44.0
     modules:
+      - go.opentelemetry.io/otel/bridge/opencensus
+      - go.opentelemetry.io/otel/bridge/opencensus/test
       - go.opentelemetry.io/otel/example/opencensus
       - go.opentelemetry.io/otel/example/prometheus
-      - go.opentelemetry.io/otel/exporters/otlp/otlpmetric
       - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
       - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
       - go.opentelemetry.io/otel/exporters/prometheus
       - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
-      - go.opentelemetry.io/otel/sdk/metric
-      - go.opentelemetry.io/otel/bridge/opencensus
-      - go.opentelemetry.io/otel/bridge/opencensus/test
-      - go.opentelemetry.io/otel/example/view
   experimental-schema:
-    version: v0.0.4
+    version: v0.0.7
     modules:
       - go.opentelemetry.io/otel/schema
 excluded-modules: