+ pr := ParsedResult{
+ ResourceName: subExpressions[1],
+ ResultName: subExpressions[3],
+ ResultType: "object",
+ ObjectKey: subExpressions[4],
+ }
+ return pr, nil
+ }
+ }
+ return ParsedResult{}, fmt.Errorf("must be one of the form 1). %q; 2). %q; 3). %q; 4). %q", resultExpressionFormat, objectResultExpressionFormat, stepResultExpressionFormat, objectStepResultExpressionFormat)
+}
+
+// ParseTaskExpression parses the input string and searches for the use of task result usage.
+func ParseTaskExpression(substitutionExpression string) (ParsedResult, error) {
+ if LooksLikeResultRef(substitutionExpression) {
+ return parseExpression(substitutionExpression)
+ }
+ return ParsedResult{}, fmt.Errorf("must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat)
+}
+
+// ParseStepExpression parses the input string and searches for the use of step result usage.
+func ParseStepExpression(substitutionExpression string) (ParsedResult, error) {
+ if looksLikeStepResultRef(substitutionExpression) {
+ return parseExpression(substitutionExpression)
+ }
+ return ParsedResult{}, fmt.Errorf("must be one of the form 1). %q; 2). %q", stepResultExpressionFormat, objectStepResultExpressionFormat)
+}
+
+// ParseResultName parse the input string to extract resultName and result index.
+// Array indexing:
+// Input: anArrayResult[1]
+// Output: anArrayResult, "1"
+// Array star reference:
+// Input: anArrayResult[*]
+// Output: anArrayResult, "*"
+func ParseResultName(resultName string) (string, string) {
+ stringIdx := strings.TrimSuffix(strings.TrimPrefix(arrayIndexingRegex.FindString(resultName), "["), "]")
+ resultName = arrayIndexingRegex.ReplaceAllString(resultName, "")
+ return resultName, stringIdx
+}
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/spire/config/config.go b/vendor/github.com/tektoncd/pipeline/pkg/spire/config/config.go
index f8fed32da..4f1a4a5f2 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/spire/config/config.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/spire/config/config.go
@@ -17,6 +17,7 @@ limitations under the License.
package config
import (
+ "errors"
"fmt"
"sort"
"strings"
@@ -62,7 +63,7 @@ func (c SpireConfig) Validate() error {
}
if !strings.HasPrefix(c.NodeAliasPrefix, "/") {
- return fmt.Errorf("Spire node alias should start with a /")
+ return errors.New("Spire node alias should start with a /")
}
return nil
diff --git a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go
index 69b2c0d82..8e1acab2f 100644
--- a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go
+++ b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go
@@ -334,11 +334,25 @@ func TrimArrayIndex(s string) string {
return arrayIndexingRegex.ReplaceAllString(s, "")
}
-// ExtractParamsExpressions will find all `$(params.paramName[int])` expressions
-func ExtractParamsExpressions(s string) []string {
+// ExtractArrayIndexingParamsExpressions will find all `$(params.paramName[int])` expressions
+func ExtractArrayIndexingParamsExpressions(s string) []string {
return paramIndexingRegex.FindAllString(s, -1)
}
+func ExtractVariableExpressions(s, prefix string) ([]string, error) {
+ pattern := fmt.Sprintf(braceMatchingRegex, prefix, parameterSubstitution, parameterSubstitution, parameterSubstitution)
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse regex pattern: %w", err)
+ }
+
+ matches := re.FindAllString(s, -1)
+ if len(matches) == 0 {
+ return []string{}, nil
+ }
+ return matches, nil
+}
+
// ExtractIndexString will find the leftmost match of `[int]`
func ExtractIndexString(s string) string {
return intIndexRegex.FindString(s)
diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
index af006fc92..6a6b3e018 100644
--- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
+++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go
@@ -56,6 +56,11 @@ func (tr *Reader) RawBytes() []byte {
}
+// ExpectedPadding returns the number of bytes of padding expected after the last header returned by Next()
+func (tr *Reader) ExpectedPadding() int64 {
+ return tr.pad
+}
+
// NewReader creates a new Reader reading from r.
func NewReader(r io.Reader) *Reader {
return &Reader{r: r, curr: ®FileReader{r, 0}}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
deleted file mode 100644
index 571116cc3..000000000
--- a/vendor/go.uber.org/atomic/.codecov.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-coverage:
- range: 80..100
- round: down
- precision: 2
-
- status:
- project: # measuring the overall project coverage
- default: # context, you can create multiple ones with custom titles
- enabled: yes # must be yes|true to enable this status
- target: 100 # specify the target coverage for each commit status
- # option: "auto" (must increase from parent commit or pull request base)
- # option: "X%" a static target percentage to hit
- if_not_found: success # if parent is not found report status as success, error, or failure
- if_ci_failed: error # if ci fails report status as success, error, or failure
-
-# Also update COVER_IGNORE_PKGS in the Makefile.
-ignore:
- - /internal/gen-atomicint/
- - /internal/gen-valuewrapper/
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
deleted file mode 100644
index 2e337a0ed..000000000
--- a/vendor/go.uber.org/atomic/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-/bin
-.DS_Store
-/vendor
-cover.html
-cover.out
-lint.log
-
-# Binaries
-*.test
-
-# Profiling output
-*.prof
-
-# Output of fossa analyzer
-/fossa
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
deleted file mode 100644
index 6f87f33fa..000000000
--- a/vendor/go.uber.org/atomic/CHANGELOG.md
+++ /dev/null
@@ -1,127 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## [1.11.0] - 2023-05-02
-### Fixed
-- Fix initialization of `Value` wrappers.
-
-### Added
-- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print
-underlying values of pointers.
-
-[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0
-
-## [1.10.0] - 2022-08-11
-### Added
-- Add `atomic.Float32` type for atomic operations on `float32`.
-- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
- and `atomic.Value`.
-- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any
- type. This is present only for Go 1.18 or higher, and is a drop-in for
- replacement for the standard library's `sync/atomic.Pointer` type.
-
-### Changed
-- Deprecate `CAS` methods on all types in favor of corresponding
- `CompareAndSwap` methods.
-
-Thanks to @eNV25 and @icpd for their contributions to this release.
-
-[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
-
-## [1.9.0] - 2021-07-15
-### Added
-- Add `Float64.Swap` to match int atomic operations.
-- Add `atomic.Time` type for atomic operations on `time.Time` values.
-
-[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
-
-## [1.8.0] - 2021-06-09
-### Added
-- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
-- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
-
-[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
-
-## [1.7.0] - 2020-09-14
-### Added
-- Support JSON serialization and deserialization of primitive atomic types.
-- Support Text marshalling and unmarshalling for string atomics.
-
-### Changed
-- Disallow incorrect comparison of atomic values in a non-atomic way.
-
-### Removed
-- Remove dependency on `golang.org/x/{lint, tools}`.
-
-[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
-
-## [1.6.0] - 2020-02-24
-### Changed
-- Drop library dependency on `golang.org/x/{lint, tools}`.
-
-[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
-
-## [1.5.1] - 2019-11-19
-- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
- causing `CAS` to fail even though the old value matches.
-
-[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
-
-## [1.5.0] - 2019-10-29
-### Changed
-- With Go modules, only the `go.uber.org/atomic` import path is supported now.
- If you need to use the old import path, please add a `replace` directive to
- your `go.mod`.
-
-[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
-
-## [1.4.0] - 2019-05-01
-### Added
- - Add `atomic.Error` type for atomic operations on `error` values.
-
-[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
-
-## [1.3.2] - 2018-05-02
-### Added
-- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
-
-[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
-
-## [1.3.1] - 2017-11-14
-### Fixed
-- Revert optimization for `atomic.String.Store("")` which caused data races.
-
-[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
-
-## [1.3.0] - 2017-11-13
-### Added
-- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
-
-### Changed
-- Optimize `atomic.String.Store("")` by avoiding an allocation.
-
-[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
-
-## [1.2.0] - 2017-04-12
-### Added
-- Shadow `atomic.Value` from `sync/atomic`.
-
-[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
-
-## [1.1.0] - 2017-03-10
-### Added
-- Add atomic `Float64` type.
-
-### Changed
-- Support new `go.uber.org/atomic` import path.
-
-[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
-
-## [1.0.0] - 2016-07-18
-
-- Initial release.
-
-[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
deleted file mode 100644
index 8765c9fbc..000000000
--- a/vendor/go.uber.org/atomic/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2016 Uber Technologies, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
deleted file mode 100644
index 46c945b32..000000000
--- a/vendor/go.uber.org/atomic/Makefile
+++ /dev/null
@@ -1,79 +0,0 @@
-# Directory to place `go install`ed binaries into.
-export GOBIN ?= $(shell pwd)/bin
-
-GOLINT = $(GOBIN)/golint
-GEN_ATOMICINT = $(GOBIN)/gen-atomicint
-GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
-STATICCHECK = $(GOBIN)/staticcheck
-
-GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
-
-# Also update ignore section in .codecov.yml.
-COVER_IGNORE_PKGS = \
- go.uber.org/atomic/internal/gen-atomicint \
- go.uber.org/atomic/internal/gen-atomicwrapper
-
-.PHONY: build
-build:
- go build ./...
-
-.PHONY: test
-test:
- go test -race ./...
-
-.PHONY: gofmt
-gofmt:
- $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
- gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
- @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
-
-$(GOLINT):
- cd tools && go install golang.org/x/lint/golint
-
-$(STATICCHECK):
- cd tools && go install honnef.co/go/tools/cmd/staticcheck
-
-$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
- go build -o $@ ./internal/gen-atomicwrapper
-
-$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
- go build -o $@ ./internal/gen-atomicint
-
-.PHONY: golint
-golint: $(GOLINT)
- $(GOLINT) ./...
-
-.PHONY: staticcheck
-staticcheck: $(STATICCHECK)
- $(STATICCHECK) ./...
-
-.PHONY: lint
-lint: gofmt golint staticcheck generatenodirty
-
-# comma separated list of packages to consider for code coverage.
-COVER_PKG = $(shell \
- go list -find ./... | \
- grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
- paste -sd, -)
-
-.PHONY: cover
-cover:
- go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
- go tool cover -html=cover.out -o cover.html
-
-.PHONY: generate
-generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
- go generate ./...
-
-.PHONY: generatenodirty
-generatenodirty:
- @[ -z "$$(git status --porcelain)" ] || ( \
- echo "Working tree is dirty. Commit your changes first."; \
- git status; \
- exit 1 )
- @make generate
- @status=$$(git status --porcelain); \
- [ -z "$$status" ] || ( \
- echo "Working tree is dirty after `make generate`:"; \
- echo "$$status"; \
- echo "Please ensure that the generated code is up-to-date." )
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
deleted file mode 100644
index 96b47a1f1..000000000
--- a/vendor/go.uber.org/atomic/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
-
-Simple wrappers for primitive types to enforce atomic access.
-
-## Installation
-
-```shell
-$ go get -u go.uber.org/atomic@v1
-```
-
-### Legacy Import Path
-
-As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
-of using this package. If you are using Go modules, this package will fail to
-compile with the legacy import path path `github.com/uber-go/atomic`.
-
-We recommend migrating your code to the new import path but if you're unable
-to do so, or if your dependencies are still using the old import path, you
-will have to add a `replace` directive to your `go.mod` file downgrading the
-legacy import path to an older version.
-
-```
-replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
-```
-
-You can do so automatically by running the following command.
-
-```shell
-$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
-```
-
-## Usage
-
-The standard library's `sync/atomic` is powerful, but it's easy to forget which
-variables must be accessed atomically. `go.uber.org/atomic` preserves all the
-functionality of the standard library, but wraps the primitive types to
-provide a safer, more convenient API.
-
-```go
-var atom atomic.Uint32
-atom.Store(42)
-atom.Sub(2)
-atom.CAS(40, 11)
-```
-
-See the [documentation][doc] for a complete API specification.
-
-## Development Status
-
-Stable.
-
----
-
-Released under the [MIT License](LICENSE.txt).
-
-[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
-[doc]: https://godoc.org/go.uber.org/atomic
-[ci-img]: https://github.com/uber-go/atomic/actions/workflows/go.yml/badge.svg
-[ci]: https://github.com/uber-go/atomic/actions/workflows/go.yml
-[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
-[cov]: https://codecov.io/gh/uber-go/atomic
-[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
-[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
deleted file mode 100644
index f0a2ddd14..000000000
--- a/vendor/go.uber.org/atomic/bool.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
-)
-
-// Bool is an atomic type-safe wrapper for bool values.
-type Bool struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint32
-}
-
-var _zeroBool bool
-
-// NewBool creates a new Bool.
-func NewBool(val bool) *Bool {
- x := &Bool{}
- if val != _zeroBool {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped bool.
-func (x *Bool) Load() bool {
- return truthy(x.v.Load())
-}
-
-// Store atomically stores the passed bool.
-func (x *Bool) Store(val bool) {
- x.v.Store(boolToInt(val))
-}
-
-// CAS is an atomic compare-and-swap for bool values.
-//
-// Deprecated: Use CompareAndSwap.
-func (x *Bool) CAS(old, new bool) (swapped bool) {
- return x.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap for bool values.
-func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
- return x.v.CompareAndSwap(boolToInt(old), boolToInt(new))
-}
-
-// Swap atomically stores the given bool and returns the old
-// value.
-func (x *Bool) Swap(val bool) (old bool) {
- return truthy(x.v.Swap(boolToInt(val)))
-}
-
-// MarshalJSON encodes the wrapped bool into JSON.
-func (x *Bool) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a bool from JSON.
-func (x *Bool) UnmarshalJSON(b []byte) error {
- var v bool
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
deleted file mode 100644
index a2e60e987..000000000
--- a/vendor/go.uber.org/atomic/bool_ext.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "strconv"
-)
-
-//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go
-
-func truthy(n uint32) bool {
- return n == 1
-}
-
-func boolToInt(b bool) uint32 {
- if b {
- return 1
- }
- return 0
-}
-
-// Toggle atomically negates the Boolean and returns the previous value.
-func (b *Bool) Toggle() (old bool) {
- for {
- old := b.Load()
- if b.CAS(old, !old) {
- return old
- }
- }
-}
-
-// String encodes the wrapped value as a string.
-func (b *Bool) String() string {
- return strconv.FormatBool(b.Load())
-}
diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go
deleted file mode 100644
index ae7390ee6..000000000
--- a/vendor/go.uber.org/atomic/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package atomic provides simple wrappers around numerics to enforce atomic
-// access.
-package atomic
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
deleted file mode 100644
index 7c23868fc..000000000
--- a/vendor/go.uber.org/atomic/duration.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "time"
-)
-
-// Duration is an atomic type-safe wrapper for time.Duration values.
-type Duration struct {
- _ nocmp // disallow non-atomic comparison
-
- v Int64
-}
-
-var _zeroDuration time.Duration
-
-// NewDuration creates a new Duration.
-func NewDuration(val time.Duration) *Duration {
- x := &Duration{}
- if val != _zeroDuration {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped time.Duration.
-func (x *Duration) Load() time.Duration {
- return time.Duration(x.v.Load())
-}
-
-// Store atomically stores the passed time.Duration.
-func (x *Duration) Store(val time.Duration) {
- x.v.Store(int64(val))
-}
-
-// CAS is an atomic compare-and-swap for time.Duration values.
-//
-// Deprecated: Use CompareAndSwap.
-func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
- return x.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap for time.Duration values.
-func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) {
- return x.v.CompareAndSwap(int64(old), int64(new))
-}
-
-// Swap atomically stores the given time.Duration and returns the old
-// value.
-func (x *Duration) Swap(val time.Duration) (old time.Duration) {
- return time.Duration(x.v.Swap(int64(val)))
-}
-
-// MarshalJSON encodes the wrapped time.Duration into JSON.
-func (x *Duration) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a time.Duration from JSON.
-func (x *Duration) UnmarshalJSON(b []byte) error {
- var v time.Duration
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
deleted file mode 100644
index 4c18b0a9e..000000000
--- a/vendor/go.uber.org/atomic/duration_ext.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "time"
-
-//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
-
-// Add atomically adds to the wrapped time.Duration and returns the new value.
-func (d *Duration) Add(delta time.Duration) time.Duration {
- return time.Duration(d.v.Add(int64(delta)))
-}
-
-// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
-func (d *Duration) Sub(delta time.Duration) time.Duration {
- return time.Duration(d.v.Sub(int64(delta)))
-}
-
-// String encodes the wrapped value as a string.
-func (d *Duration) String() string {
- return d.Load().String()
-}
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
deleted file mode 100644
index b7e3f1291..000000000
--- a/vendor/go.uber.org/atomic/error.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// Error is an atomic type-safe wrapper for error values.
-type Error struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroError error
-
-// NewError creates a new Error.
-func NewError(val error) *Error {
- x := &Error{}
- if val != _zeroError {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped error.
-func (x *Error) Load() error {
- return unpackError(x.v.Load())
-}
-
-// Store atomically stores the passed error.
-func (x *Error) Store(val error) {
- x.v.Store(packError(val))
-}
-
-// CompareAndSwap is an atomic compare-and-swap for error values.
-func (x *Error) CompareAndSwap(old, new error) (swapped bool) {
- if x.v.CompareAndSwap(packError(old), packError(new)) {
- return true
- }
-
- if old == _zeroError {
- // If the old value is the empty value, then it's possible the
- // underlying Value hasn't been set and is nil, so retry with nil.
- return x.v.CompareAndSwap(nil, packError(new))
- }
-
- return false
-}
-
-// Swap atomically stores the given error and returns the old
-// value.
-func (x *Error) Swap(val error) (old error) {
- return unpackError(x.v.Swap(packError(val)))
-}
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go
deleted file mode 100644
index d31fb633b..000000000
--- a/vendor/go.uber.org/atomic/error_ext.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// atomic.Value panics on nil inputs, or if the underlying type changes.
-// Stabilize by always storing a custom struct that we control.
-
-//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go
-
-type packedError struct{ Value error }
-
-func packError(v error) interface{} {
- return packedError{v}
-}
-
-func unpackError(v interface{}) error {
- if err, ok := v.(packedError); ok {
- return err.Value
- }
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/float32.go b/vendor/go.uber.org/atomic/float32.go
deleted file mode 100644
index 62c36334f..000000000
--- a/vendor/go.uber.org/atomic/float32.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "math"
-)
-
-// Float32 is an atomic type-safe wrapper for float32 values.
-type Float32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint32
-}
-
-var _zeroFloat32 float32
-
-// NewFloat32 creates a new Float32.
-func NewFloat32(val float32) *Float32 {
- x := &Float32{}
- if val != _zeroFloat32 {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped float32.
-func (x *Float32) Load() float32 {
- return math.Float32frombits(x.v.Load())
-}
-
-// Store atomically stores the passed float32.
-func (x *Float32) Store(val float32) {
- x.v.Store(math.Float32bits(val))
-}
-
-// Swap atomically stores the given float32 and returns the old
-// value.
-func (x *Float32) Swap(val float32) (old float32) {
- return math.Float32frombits(x.v.Swap(math.Float32bits(val)))
-}
-
-// MarshalJSON encodes the wrapped float32 into JSON.
-func (x *Float32) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a float32 from JSON.
-func (x *Float32) UnmarshalJSON(b []byte) error {
- var v float32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/float32_ext.go b/vendor/go.uber.org/atomic/float32_ext.go
deleted file mode 100644
index b0cd8d9c8..000000000
--- a/vendor/go.uber.org/atomic/float32_ext.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "math"
- "strconv"
-)
-
-//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go
-
-// Add atomically adds to the wrapped float32 and returns the new value.
-func (f *Float32) Add(delta float32) float32 {
- for {
- old := f.Load()
- new := old + delta
- if f.CAS(old, new) {
- return new
- }
- }
-}
-
-// Sub atomically subtracts from the wrapped float32 and returns the new value.
-func (f *Float32) Sub(delta float32) float32 {
- return f.Add(-delta)
-}
-
-// CAS is an atomic compare-and-swap for float32 values.
-//
-// Deprecated: Use CompareAndSwap
-func (f *Float32) CAS(old, new float32) (swapped bool) {
- return f.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap for float32 values.
-//
-// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
-// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
-// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
-//
-// for {
-// old := atom.Load()
-// new = f(old)
-// if atom.CompareAndSwap(old, new) {
-// break
-// }
-// }
-//
-// If CompareAndSwap did not match NaN to match, then the above would loop forever.
-func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) {
- return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new))
-}
-
-// String encodes the wrapped value as a string.
-func (f *Float32) String() string {
- // 'g' is the behavior for floats with %v.
- return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32)
-}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
deleted file mode 100644
index 5bc11caab..000000000
--- a/vendor/go.uber.org/atomic/float64.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "math"
-)
-
-// Float64 is an atomic type-safe wrapper for float64 values.
-type Float64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v Uint64
-}
-
-var _zeroFloat64 float64
-
-// NewFloat64 creates a new Float64.
-func NewFloat64(val float64) *Float64 {
- x := &Float64{}
- if val != _zeroFloat64 {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped float64.
-func (x *Float64) Load() float64 {
- return math.Float64frombits(x.v.Load())
-}
-
-// Store atomically stores the passed float64.
-func (x *Float64) Store(val float64) {
- x.v.Store(math.Float64bits(val))
-}
-
-// Swap atomically stores the given float64 and returns the old
-// value.
-func (x *Float64) Swap(val float64) (old float64) {
- return math.Float64frombits(x.v.Swap(math.Float64bits(val)))
-}
-
-// MarshalJSON encodes the wrapped float64 into JSON.
-func (x *Float64) MarshalJSON() ([]byte, error) {
- return json.Marshal(x.Load())
-}
-
-// UnmarshalJSON decodes a float64 from JSON.
-func (x *Float64) UnmarshalJSON(b []byte) error {
- var v float64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- x.Store(v)
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
deleted file mode 100644
index 48c52b0ab..000000000
--- a/vendor/go.uber.org/atomic/float64_ext.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) 2020-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "math"
- "strconv"
-)
-
-//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go
-
-// Add atomically adds to the wrapped float64 and returns the new value.
-func (f *Float64) Add(delta float64) float64 {
- for {
- old := f.Load()
- new := old + delta
- if f.CAS(old, new) {
- return new
- }
- }
-}
-
-// Sub atomically subtracts from the wrapped float64 and returns the new value.
-func (f *Float64) Sub(delta float64) float64 {
- return f.Add(-delta)
-}
-
-// CAS is an atomic compare-and-swap for float64 values.
-//
-// Deprecated: Use CompareAndSwap
-func (f *Float64) CAS(old, new float64) (swapped bool) {
- return f.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap for float64 values.
-//
-// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
-// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
-// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
-//
-// for {
-// old := atom.Load()
-// new = f(old)
-// if atom.CompareAndSwap(old, new) {
-// break
-// }
-// }
-//
-// If CompareAndSwap did not match NaN to match, then the above would loop forever.
-func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) {
- return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new))
-}
-
-// String encodes the wrapped value as a string.
-func (f *Float64) String() string {
- // 'g' is the behavior for floats with %v.
- return strconv.FormatFloat(f.Load(), 'g', -1, 64)
-}
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
deleted file mode 100644
index 1e9ef4f87..000000000
--- a/vendor/go.uber.org/atomic/gen.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
-//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
-//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
-//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
-//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
deleted file mode 100644
index 5320eac10..000000000
--- a/vendor/go.uber.org/atomic/int32.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Int32 is an atomic wrapper around int32.
-type Int32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v int32
-}
-
-// NewInt32 creates a new Int32.
-func NewInt32(val int32) *Int32 {
- return &Int32{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Int32) Load() int32 {
- return atomic.LoadInt32(&i.v)
-}
-
-// Add atomically adds to the wrapped int32 and returns the new value.
-func (i *Int32) Add(delta int32) int32 {
- return atomic.AddInt32(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped int32 and returns the new value.
-func (i *Int32) Sub(delta int32) int32 {
- return atomic.AddInt32(&i.v, -delta)
-}
-
-// Inc atomically increments the wrapped int32 and returns the new value.
-func (i *Int32) Inc() int32 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped int32 and returns the new value.
-func (i *Int32) Dec() int32 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Int32) CAS(old, new int32) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) {
- return atomic.CompareAndSwapInt32(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Int32) Store(val int32) {
- atomic.StoreInt32(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped int32 and returns the old value.
-func (i *Int32) Swap(val int32) (old int32) {
- return atomic.SwapInt32(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped int32 into JSON.
-func (i *Int32) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped int32.
-func (i *Int32) UnmarshalJSON(b []byte) error {
- var v int32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Int32) String() string {
- v := i.Load()
- return strconv.FormatInt(int64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
deleted file mode 100644
index 460821d00..000000000
--- a/vendor/go.uber.org/atomic/int64.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Int64 is an atomic wrapper around int64.
-type Int64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v int64
-}
-
-// NewInt64 creates a new Int64.
-func NewInt64(val int64) *Int64 {
- return &Int64{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Int64) Load() int64 {
- return atomic.LoadInt64(&i.v)
-}
-
-// Add atomically adds to the wrapped int64 and returns the new value.
-func (i *Int64) Add(delta int64) int64 {
- return atomic.AddInt64(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped int64 and returns the new value.
-func (i *Int64) Sub(delta int64) int64 {
- return atomic.AddInt64(&i.v, -delta)
-}
-
-// Inc atomically increments the wrapped int64 and returns the new value.
-func (i *Int64) Inc() int64 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped int64 and returns the new value.
-func (i *Int64) Dec() int64 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Int64) CAS(old, new int64) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) {
- return atomic.CompareAndSwapInt64(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Int64) Store(val int64) {
- atomic.StoreInt64(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped int64 and returns the old value.
-func (i *Int64) Swap(val int64) (old int64) {
- return atomic.SwapInt64(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped int64 into JSON.
-func (i *Int64) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped int64.
-func (i *Int64) UnmarshalJSON(b []byte) error {
- var v int64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Int64) String() string {
- v := i.Load()
- return strconv.FormatInt(int64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
deleted file mode 100644
index 54b74174a..000000000
--- a/vendor/go.uber.org/atomic/nocmp.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// nocmp is an uncomparable struct. Embed this inside another struct to make
-// it uncomparable.
-//
-// type Foo struct {
-// nocmp
-// // ...
-// }
-//
-// This DOES NOT:
-//
-// - Disallow shallow copies of structs
-// - Disallow comparison of pointers to uncomparable structs
-type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/pointer_go118.go b/vendor/go.uber.org/atomic/pointer_go118.go
deleted file mode 100644
index 1fb6c03b2..000000000
--- a/vendor/go.uber.org/atomic/pointer_go118.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.18
-// +build go1.18
-
-package atomic
-
-import "fmt"
-
-// String returns a human readable representation of a Pointer's underlying value.
-func (p *Pointer[T]) String() string {
- return fmt.Sprint(p.Load())
-}
diff --git a/vendor/go.uber.org/atomic/pointer_go118_pre119.go b/vendor/go.uber.org/atomic/pointer_go118_pre119.go
deleted file mode 100644
index e0f47dba4..000000000
--- a/vendor/go.uber.org/atomic/pointer_go118_pre119.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.18 && !go1.19
-// +build go1.18,!go1.19
-
-package atomic
-
-import "unsafe"
-
-type Pointer[T any] struct {
- _ nocmp // disallow non-atomic comparison
- p UnsafePointer
-}
-
-// NewPointer creates a new Pointer.
-func NewPointer[T any](v *T) *Pointer[T] {
- var p Pointer[T]
- if v != nil {
- p.p.Store(unsafe.Pointer(v))
- }
- return &p
-}
-
-// Load atomically loads the wrapped value.
-func (p *Pointer[T]) Load() *T {
- return (*T)(p.p.Load())
-}
-
-// Store atomically stores the passed value.
-func (p *Pointer[T]) Store(val *T) {
- p.p.Store(unsafe.Pointer(val))
-}
-
-// Swap atomically swaps the wrapped pointer and returns the old value.
-func (p *Pointer[T]) Swap(val *T) (old *T) {
- return (*T)(p.p.Swap(unsafe.Pointer(val)))
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
- return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new))
-}
diff --git a/vendor/go.uber.org/atomic/pointer_go119.go b/vendor/go.uber.org/atomic/pointer_go119.go
deleted file mode 100644
index 6726f17ad..000000000
--- a/vendor/go.uber.org/atomic/pointer_go119.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) 2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-//go:build go1.19
-// +build go1.19
-
-package atomic
-
-import "sync/atomic"
-
-// Pointer is an atomic pointer of type *T.
-type Pointer[T any] struct {
- _ nocmp // disallow non-atomic comparison
- p atomic.Pointer[T]
-}
-
-// NewPointer creates a new Pointer.
-func NewPointer[T any](v *T) *Pointer[T] {
- var p Pointer[T]
- if v != nil {
- p.p.Store(v)
- }
- return &p
-}
-
-// Load atomically loads the wrapped value.
-func (p *Pointer[T]) Load() *T {
- return p.p.Load()
-}
-
-// Store atomically stores the passed value.
-func (p *Pointer[T]) Store(val *T) {
- p.p.Store(val)
-}
-
-// Swap atomically swaps the wrapped pointer and returns the old value.
-func (p *Pointer[T]) Swap(val *T) (old *T) {
- return p.p.Swap(val)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
- return p.p.CompareAndSwap(old, new)
-}
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
deleted file mode 100644
index 061466c5b..000000000
--- a/vendor/go.uber.org/atomic/string.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-// String is an atomic type-safe wrapper for string values.
-type String struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroString string
-
-// NewString creates a new String.
-func NewString(val string) *String {
- x := &String{}
- if val != _zeroString {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped string.
-func (x *String) Load() string {
- return unpackString(x.v.Load())
-}
-
-// Store atomically stores the passed string.
-func (x *String) Store(val string) {
- x.v.Store(packString(val))
-}
-
-// CompareAndSwap is an atomic compare-and-swap for string values.
-func (x *String) CompareAndSwap(old, new string) (swapped bool) {
- if x.v.CompareAndSwap(packString(old), packString(new)) {
- return true
- }
-
- if old == _zeroString {
- // If the old value is the empty value, then it's possible the
- // underlying Value hasn't been set and is nil, so retry with nil.
- return x.v.CompareAndSwap(nil, packString(new))
- }
-
- return false
-}
-
-// Swap atomically stores the given string and returns the old
-// value.
-func (x *String) Swap(val string) (old string) {
- return unpackString(x.v.Swap(packString(val)))
-}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
deleted file mode 100644
index 019109c86..000000000
--- a/vendor/go.uber.org/atomic/string_ext.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go
-
-func packString(s string) interface{} {
- return s
-}
-
-func unpackString(v interface{}) string {
- if s, ok := v.(string); ok {
- return s
- }
- return ""
-}
-
-// String returns the wrapped value.
-func (s *String) String() string {
- return s.Load()
-}
-
-// MarshalText encodes the wrapped string into a textual form.
-//
-// This makes it encodable as JSON, YAML, XML, and more.
-func (s *String) MarshalText() ([]byte, error) {
- return []byte(s.Load()), nil
-}
-
-// UnmarshalText decodes text and replaces the wrapped string with it.
-//
-// This makes it decodable from JSON, YAML, XML, and more.
-func (s *String) UnmarshalText(b []byte) error {
- s.Store(string(b))
- return nil
-}
diff --git a/vendor/go.uber.org/atomic/time.go b/vendor/go.uber.org/atomic/time.go
deleted file mode 100644
index cc2a230c0..000000000
--- a/vendor/go.uber.org/atomic/time.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// @generated Code generated by gen-atomicwrapper.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "time"
-)
-
-// Time is an atomic type-safe wrapper for time.Time values.
-type Time struct {
- _ nocmp // disallow non-atomic comparison
-
- v Value
-}
-
-var _zeroTime time.Time
-
-// NewTime creates a new Time.
-func NewTime(val time.Time) *Time {
- x := &Time{}
- if val != _zeroTime {
- x.Store(val)
- }
- return x
-}
-
-// Load atomically loads the wrapped time.Time.
-func (x *Time) Load() time.Time {
- return unpackTime(x.v.Load())
-}
-
-// Store atomically stores the passed time.Time.
-func (x *Time) Store(val time.Time) {
- x.v.Store(packTime(val))
-}
diff --git a/vendor/go.uber.org/atomic/time_ext.go b/vendor/go.uber.org/atomic/time_ext.go
deleted file mode 100644
index 1e3dc978a..000000000
--- a/vendor/go.uber.org/atomic/time_ext.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) 2021 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "time"
-
-//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go
-
-func packTime(t time.Time) interface{} {
- return t
-}
-
-func unpackTime(v interface{}) time.Time {
- if t, ok := v.(time.Time); ok {
- return t
- }
- return time.Time{}
-}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
deleted file mode 100644
index 4adc294ac..000000000
--- a/vendor/go.uber.org/atomic/uint32.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uint32 is an atomic wrapper around uint32.
-type Uint32 struct {
- _ nocmp // disallow non-atomic comparison
-
- v uint32
-}
-
-// NewUint32 creates a new Uint32.
-func NewUint32(val uint32) *Uint32 {
- return &Uint32{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uint32) Load() uint32 {
- return atomic.LoadUint32(&i.v)
-}
-
-// Add atomically adds to the wrapped uint32 and returns the new value.
-func (i *Uint32) Add(delta uint32) uint32 {
- return atomic.AddUint32(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped uint32 and returns the new value.
-func (i *Uint32) Sub(delta uint32) uint32 {
- return atomic.AddUint32(&i.v, ^(delta - 1))
-}
-
-// Inc atomically increments the wrapped uint32 and returns the new value.
-func (i *Uint32) Inc() uint32 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uint32 and returns the new value.
-func (i *Uint32) Dec() uint32 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Uint32) CAS(old, new uint32) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
- return atomic.CompareAndSwapUint32(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uint32) Store(val uint32) {
- atomic.StoreUint32(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped uint32 and returns the old value.
-func (i *Uint32) Swap(val uint32) (old uint32) {
- return atomic.SwapUint32(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped uint32 into JSON.
-func (i *Uint32) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uint32.
-func (i *Uint32) UnmarshalJSON(b []byte) error {
- var v uint32
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uint32) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
deleted file mode 100644
index 0e2eddb30..000000000
--- a/vendor/go.uber.org/atomic/uint64.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uint64 is an atomic wrapper around uint64.
-type Uint64 struct {
- _ nocmp // disallow non-atomic comparison
-
- v uint64
-}
-
-// NewUint64 creates a new Uint64.
-func NewUint64(val uint64) *Uint64 {
- return &Uint64{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uint64) Load() uint64 {
- return atomic.LoadUint64(&i.v)
-}
-
-// Add atomically adds to the wrapped uint64 and returns the new value.
-func (i *Uint64) Add(delta uint64) uint64 {
- return atomic.AddUint64(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped uint64 and returns the new value.
-func (i *Uint64) Sub(delta uint64) uint64 {
- return atomic.AddUint64(&i.v, ^(delta - 1))
-}
-
-// Inc atomically increments the wrapped uint64 and returns the new value.
-func (i *Uint64) Inc() uint64 {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uint64 and returns the new value.
-func (i *Uint64) Dec() uint64 {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Uint64) CAS(old, new uint64) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
- return atomic.CompareAndSwapUint64(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uint64) Store(val uint64) {
- atomic.StoreUint64(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped uint64 and returns the old value.
-func (i *Uint64) Swap(val uint64) (old uint64) {
- return atomic.SwapUint64(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped uint64 into JSON.
-func (i *Uint64) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uint64.
-func (i *Uint64) UnmarshalJSON(b []byte) error {
- var v uint64
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uint64) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/uintptr.go b/vendor/go.uber.org/atomic/uintptr.go
deleted file mode 100644
index 7d5b000d6..000000000
--- a/vendor/go.uber.org/atomic/uintptr.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// @generated Code generated by gen-atomicint.
-
-// Copyright (c) 2020-2023 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "encoding/json"
- "strconv"
- "sync/atomic"
-)
-
-// Uintptr is an atomic wrapper around uintptr.
-type Uintptr struct {
- _ nocmp // disallow non-atomic comparison
-
- v uintptr
-}
-
-// NewUintptr creates a new Uintptr.
-func NewUintptr(val uintptr) *Uintptr {
- return &Uintptr{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (i *Uintptr) Load() uintptr {
- return atomic.LoadUintptr(&i.v)
-}
-
-// Add atomically adds to the wrapped uintptr and returns the new value.
-func (i *Uintptr) Add(delta uintptr) uintptr {
- return atomic.AddUintptr(&i.v, delta)
-}
-
-// Sub atomically subtracts from the wrapped uintptr and returns the new value.
-func (i *Uintptr) Sub(delta uintptr) uintptr {
- return atomic.AddUintptr(&i.v, ^(delta - 1))
-}
-
-// Inc atomically increments the wrapped uintptr and returns the new value.
-func (i *Uintptr) Inc() uintptr {
- return i.Add(1)
-}
-
-// Dec atomically decrements the wrapped uintptr and returns the new value.
-func (i *Uintptr) Dec() uintptr {
- return i.Sub(1)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap.
-func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
- return i.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
- return atomic.CompareAndSwapUintptr(&i.v, old, new)
-}
-
-// Store atomically stores the passed value.
-func (i *Uintptr) Store(val uintptr) {
- atomic.StoreUintptr(&i.v, val)
-}
-
-// Swap atomically swaps the wrapped uintptr and returns the old value.
-func (i *Uintptr) Swap(val uintptr) (old uintptr) {
- return atomic.SwapUintptr(&i.v, val)
-}
-
-// MarshalJSON encodes the wrapped uintptr into JSON.
-func (i *Uintptr) MarshalJSON() ([]byte, error) {
- return json.Marshal(i.Load())
-}
-
-// UnmarshalJSON decodes JSON into the wrapped uintptr.
-func (i *Uintptr) UnmarshalJSON(b []byte) error {
- var v uintptr
- if err := json.Unmarshal(b, &v); err != nil {
- return err
- }
- i.Store(v)
- return nil
-}
-
-// String encodes the wrapped value as a string.
-func (i *Uintptr) String() string {
- v := i.Load()
- return strconv.FormatUint(uint64(v), 10)
-}
diff --git a/vendor/go.uber.org/atomic/unsafe_pointer.go b/vendor/go.uber.org/atomic/unsafe_pointer.go
deleted file mode 100644
index 34868baf6..000000000
--- a/vendor/go.uber.org/atomic/unsafe_pointer.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2021-2022 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import (
- "sync/atomic"
- "unsafe"
-)
-
-// UnsafePointer is an atomic wrapper around unsafe.Pointer.
-type UnsafePointer struct {
- _ nocmp // disallow non-atomic comparison
-
- v unsafe.Pointer
-}
-
-// NewUnsafePointer creates a new UnsafePointer.
-func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer {
- return &UnsafePointer{v: val}
-}
-
-// Load atomically loads the wrapped value.
-func (p *UnsafePointer) Load() unsafe.Pointer {
- return atomic.LoadPointer(&p.v)
-}
-
-// Store atomically stores the passed value.
-func (p *UnsafePointer) Store(val unsafe.Pointer) {
- atomic.StorePointer(&p.v, val)
-}
-
-// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value.
-func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
- return atomic.SwapPointer(&p.v, val)
-}
-
-// CAS is an atomic compare-and-swap.
-//
-// Deprecated: Use CompareAndSwap
-func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
- return p.CompareAndSwap(old, new)
-}
-
-// CompareAndSwap is an atomic compare-and-swap.
-func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) {
- return atomic.CompareAndSwapPointer(&p.v, old, new)
-}
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
deleted file mode 100644
index 52caedb9a..000000000
--- a/vendor/go.uber.org/atomic/value.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package atomic
-
-import "sync/atomic"
-
-// Value shadows the type of the same name from sync/atomic
-// https://godoc.org/sync/atomic#Value
-type Value struct {
- _ nocmp // disallow non-atomic comparison
-
- atomic.Value
-}
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
new file mode 100644
index 000000000..2c033dff4
--- /dev/null
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package constraints defines a set of useful constraints to be used
+// with type parameters.
+package constraints
+
+// Signed is a constraint that permits any signed integer type.
+// If future releases of Go add new predeclared signed integer types,
+// this constraint will be modified to include them.
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// Unsigned is a constraint that permits any unsigned integer type.
+// If future releases of Go add new predeclared unsigned integer types,
+// this constraint will be modified to include them.
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+// Integer is a constraint that permits any integer type.
+// If future releases of Go add new predeclared integer types,
+// this constraint will be modified to include them.
+type Integer interface {
+ Signed | Unsigned
+}
+
+// Float is a constraint that permits any floating-point type.
+// If future releases of Go add new predeclared floating-point types,
+// this constraint will be modified to include them.
+type Float interface {
+ ~float32 | ~float64
+}
+
+// Complex is a constraint that permits any complex numeric type.
+// If future releases of Go add new predeclared complex numeric types,
+// this constraint will be modified to include them.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// Ordered is a constraint that permits any ordered type: any type
+// that supports the operators < <= >= >.
+// If future releases of Go add new ordered types,
+// this constraint will be modified to include them.
+type Ordered interface {
+ Integer | Float | ~string
+}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
new file mode 100644
index 000000000..fbf1934a0
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/cmp.go
@@ -0,0 +1,44 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// min is a version of the predeclared function from the Go 1.21 release.
+func min[T constraints.Ordered](a, b T) T {
+ if a < b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// max is a version of the predeclared function from the Go 1.21 release.
+func max[T constraints.Ordered](a, b T) T {
+ if a > b || isNaN(a) {
+ return a
+ }
+ return b
+}
+
+// cmpLess is a copy of cmp.Less from the Go 1.21 release.
+func cmpLess[T constraints.Ordered](x, y T) bool {
+ return (isNaN(x) && !isNaN(y)) || x < y
+}
+
+// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
+func cmpCompare[T constraints.Ordered](x, y T) int {
+ xNaN := isNaN(x)
+ yNaN := isNaN(y)
+ if xNaN && yNaN {
+ return 0
+ }
+ if xNaN || x < y {
+ return -1
+ }
+ if yNaN || x > y {
+ return +1
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
new file mode 100644
index 000000000..46ceac343
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -0,0 +1,515 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices defines various functions useful with slices of any type.
+package slices
+
+import (
+ "unsafe"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Equal reports whether two slices are equal: the same length and all
+// elements equal. If the lengths are different, Equal returns false.
+// Otherwise, the elements are compared in increasing index order, and the
+// comparison stops at the first unequal pair.
+// Floating point NaNs are not considered equal.
+func Equal[S ~[]E, E comparable](s1, s2 S) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ if s1[i] != s2[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// EqualFunc reports whether two slices are equal using an equality
+// function on each pair of elements. If the lengths are different,
+// EqualFunc returns false. Otherwise, the elements are compared in
+// increasing index order, and the comparison stops at the first index
+// for which eq returns false.
+func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i, v1 := range s1 {
+ v2 := s2[i]
+ if !eq(v1, v2) {
+ return false
+ }
+ }
+ return true
+}
+
+// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
+// of elements. The elements are compared sequentially, starting at index 0,
+// until one element is not equal to the other.
+// The result of comparing the first non-matching elements is returned.
+// If both slices are equal until one of them ends, the shorter slice is
+// considered less than the longer one.
+// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
+func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmpCompare(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// CompareFunc is like [Compare] but uses a custom comparison function on each
+// pair of elements.
+// The result is the first non-zero result of cmp; if cmp always
+// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
+// and +1 if len(s1) > len(s2).
+func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
+ for i, v1 := range s1 {
+ if i >= len(s2) {
+ return +1
+ }
+ v2 := s2[i]
+ if c := cmp(v1, v2); c != 0 {
+ return c
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ return 0
+}
+
+// Index returns the index of the first occurrence of v in s,
+// or -1 if not present.
+func Index[S ~[]E, E comparable](s S, v E) int {
+ for i := range s {
+ if v == s[i] {
+ return i
+ }
+ }
+ return -1
+}
+
+// IndexFunc returns the first index i satisfying f(s[i]),
+// or -1 if none do.
+func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
+ for i := range s {
+ if f(s[i]) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Contains reports whether v is present in s.
+func Contains[S ~[]E, E comparable](s S, v E) bool {
+ return Index(s, v) >= 0
+}
+
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
+// Insert inserts the values v... into s at index i,
+// returning the modified slice.
+// The elements at s[i:] are shifted up to make room.
+// In the returned slice r, r[i] == v[0],
+// and r[i+len(v)] == value originally at r[i].
+// Insert panics if i is out of range.
+// This function is O(len(s) + len(v)).
+func Insert[S ~[]E, E any](s S, i int, v ...E) S {
+ m := len(v)
+ if m == 0 {
+ return s
+ }
+ n := len(s)
+ if i == n {
+ return append(s, v...)
+ }
+ if n+m > cap(s) {
+ // Use append rather than make so that we bump the size of
+ // the slice up to the next storage class.
+ // This is what Grow does but we don't call Grow because
+ // that might copy the values twice.
+ s2 := append(s[:i], make(S, n+m-i)...)
+ copy(s2[i:], v)
+ copy(s2[i+m:], s[i:])
+ return s2
+ }
+ s = s[:n+m]
+
+ // before:
+ // s: aaaaaaaabbbbccccccccdddd
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // after:
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ //
+ // a are the values that don't move in s.
+ // v are the values copied in from v.
+ // b and c are the values from s that are shifted up in index.
+ // d are the values that get overwritten, never to be seen again.
+
+ if !overlaps(v, s[i+m:]) {
+ // Easy case - v does not overlap either the c or d regions.
+ // (It might be in some of a or b, or elsewhere entirely.)
+ // The data we copy up doesn't write to v at all, so just do it.
+
+ copy(s[i+m:], s[i:])
+
+ // Now we have
+ // s: aaaaaaaabbbbbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // Note the b values are duplicated.
+
+ copy(s[i:], v)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+ }
+
+ // The hard case - v overlaps c or d. We can't just shift up
+ // the data because we'd move or clobber the values we're trying
+ // to insert.
+ // So instead, write v on top of d, then rotate.
+ copy(s[n:], v)
+
+ // Now we have
+ // s: aaaaaaaabbbbccccccccvvvv
+ // ^ ^ ^ ^
+ // i i+m n n+m
+
+ rotateRight(s[i:], m)
+
+ // Now we have
+ // s: aaaaaaaavvvvbbbbcccccccc
+ // ^ ^ ^ ^
+ // i i+m n n+m
+ // That's the result we want.
+ return s
+}
+
+// clearSlice sets all elements up to the length of s to the zero value of E.
+// We may use the builtin clear func instead, and remove clearSlice, when upgrading
+// to Go 1.21+.
+func clearSlice[S ~[]E, E any](s S) {
+ var zero E
+ for i := range s {
+ s[i] = zero
+ }
+}
+
+// Delete removes the elements s[i:j] from s, returning the modified slice.
+// Delete panics if j > len(s) or s[i:j] is not a valid slice of s.
+// Delete is O(len(s)-i), so if many items must be deleted, it is better to
+// make a single call deleting them all together than to delete one at a time.
+// Delete zeroes the elements s[len(s)-(j-i):len(s)].
+func Delete[S ~[]E, E any](s S, i, j int) S {
+ _ = s[i:j:len(s)] // bounds check
+
+ if i == j {
+ return s
+ }
+
+ oldlen := len(s)
+ s = append(s[:i], s[j:]...)
+ clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
+ return s
+}
+
+// DeleteFunc removes any elements from s for which del returns true,
+// returning the modified slice.
+// DeleteFunc zeroes the elements between the new length and the original length.
+func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
+ i := IndexFunc(s, del)
+ if i == -1 {
+ return s
+ }
+ // Don't start copying elements until we find one to delete.
+ for j := i + 1; j < len(s); j++ {
+ if v := s[j]; !del(v) {
+ s[i] = v
+ i++
+ }
+ }
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+ return s[:i]
+}
+
+// Replace replaces the elements s[i:j] by the given v, and returns the
+// modified slice. Replace panics if s[i:j] is not a valid slice of s.
+// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
+func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
+ _ = s[i:j] // verify that i:j is a valid subslice
+
+ if i == j {
+ return Insert(s, i, v...)
+ }
+ if j == len(s) {
+ return append(s[:i], v...)
+ }
+
+ tot := len(s[:i]) + len(v) + len(s[j:])
+ if tot > cap(s) {
+ // Too big to fit, allocate and copy over.
+ s2 := append(s[:i], make(S, tot-i)...) // See Insert
+ copy(s2[i:], v)
+ copy(s2[i+len(v):], s[j:])
+ return s2
+ }
+
+ r := s[:tot]
+
+ if i+len(v) <= j {
+ // Easy, as v fits in the deleted portion.
+ copy(r[i:], v)
+ if i+len(v) != j {
+ copy(r[i+len(v):], s[j:])
+ }
+ clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
+ return r
+ }
+
+ // We are expanding (v is bigger than j-i).
+ // The situation is something like this:
+ // (example has i=4,j=8,len(s)=16,len(v)=6)
+ // s: aaaaxxxxbbbbbbbbyy
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ // a: prefix of s
+ // x: deleted range
+ // b: more of s
+ // y: area to expand into
+
+ if !overlaps(r[i+len(v):], v) {
+ // Easy, as v is not clobbered by the first copy.
+ copy(r[i+len(v):], s[j:])
+ copy(r[i:], v)
+ return r
+ }
+
+ // This is a situation where we don't have a single place to which
+ // we can copy v. Parts of it need to go to two different places.
+ // We want to copy the prefix of v into y and the suffix into x, then
+ // rotate |y| spots to the right.
+ //
+ // v[2:] v[:2]
+ // | |
+ // s: aaaavvvvbbbbbbbbvv
+ // ^ ^ ^ ^
+ // i j len(s) tot
+ //
+ // If either of those two destinations don't alias v, then we're good.
+ y := len(v) - (j - i) // length of y portion
+
+ if !overlaps(r[i:j], v) {
+ copy(r[i:j], v[y:])
+ copy(r[len(s):], v[:y])
+ rotateRight(r[i:], y)
+ return r
+ }
+ if !overlaps(r[len(s):], v) {
+ copy(r[len(s):], v[:y])
+ copy(r[i:j], v[y:])
+ rotateRight(r[i:], y)
+ return r
+ }
+
+ // Now we know that v overlaps both x and y.
+ // That means that the entirety of b is *inside* v.
+ // So we don't need to preserve b at all; instead we
+ // can copy v first, then copy the b part of v out of
+ // v to the right destination.
+ k := startIdx(v, s[j:])
+ copy(r[i:], v)
+ copy(r[i+len(v):], r[i+k:])
+ return r
+}
+
+// Clone returns a copy of the slice.
+// The elements are copied using assignment, so this is a shallow clone.
+func Clone[S ~[]E, E any](s S) S {
+ // Preserve nil in case it matters.
+ if s == nil {
+ return nil
+ }
+ return append(S([]E{}), s...)
+}
+
+// Compact replaces consecutive runs of equal elements with a single copy.
+// This is like the uniq command found on Unix.
+// Compact modifies the contents of the slice s and returns the modified slice,
+// which may have a smaller length.
+// Compact zeroes the elements between the new length and the original length.
+func Compact[S ~[]E, E comparable](s S) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if s[k] != s[k-1] {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+ return s[:i]
+}
+
+// CompactFunc is like [Compact] but uses an equality function to compare elements.
+// For runs of elements that compare equal, CompactFunc keeps the first one.
+// CompactFunc zeroes the elements between the new length and the original length.
+func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
+ if len(s) < 2 {
+ return s
+ }
+ i := 1
+ for k := 1; k < len(s); k++ {
+ if !eq(s[k], s[k-1]) {
+ if i != k {
+ s[i] = s[k]
+ }
+ i++
+ }
+ }
+ clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
+ return s[:i]
+}
+
+// Grow increases the slice's capacity, if necessary, to guarantee space for
+// another n elements. After Grow(n), at least n elements can be appended
+// to the slice without another allocation. If n is negative or too large to
+// allocate the memory, Grow panics.
+func Grow[S ~[]E, E any](s S, n int) S {
+ if n < 0 {
+ panic("cannot be negative")
+ }
+ if n -= cap(s) - len(s); n > 0 {
+ // TODO(https://go.dev/issue/53888): Make using []E instead of S
+ // to workaround a compiler bug where the runtime.growslice optimization
+ // does not take effect. Revert when the compiler is fixed.
+ s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
+ }
+ return s
+}
+
+// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+func Clip[S ~[]E, E any](s S) S {
+ return s[:len(s):len(s)]
+}
+
+// Rotation algorithm explanation:
+//
+// rotate left by 2
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join first parts
+// 89234567 01
+// recursively rotate first left part by 2
+// 23456789 01
+// join at the end
+// 2345678901
+//
+// rotate left by 8
+// start with
+// 0123456789
+// split up like this
+// 01 234567 89
+// swap first 2 and last 2
+// 89 234567 01
+// join last parts
+// 89 23456701
+// recursively rotate second part left by 6
+// 89 01234567
+// join at the end
+// 8901234567
+
+// TODO: There are other rotate algorithms.
+// This algorithm has the desirable property that it moves each element exactly twice.
+// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
+// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
+
+// rotateLeft rotates b left by n spaces.
+// s_final[i] = s_orig[i+r], wrapping around.
+func rotateLeft[E any](s []E, r int) {
+ for r != 0 && r != len(s) {
+ if r*2 <= len(s) {
+ swap(s[:r], s[len(s)-r:])
+ s = s[:len(s)-r]
+ } else {
+ swap(s[:len(s)-r], s[r:])
+ s, r = s[len(s)-r:], r*2-len(s)
+ }
+ }
+}
+func rotateRight[E any](s []E, r int) {
+ rotateLeft(s, len(s)-r)
+}
+
+// swap swaps the contents of x and y. x and y must be equal length and disjoint.
+func swap[E any](x, y []E) {
+ for i := 0; i < len(x); i++ {
+ x[i], y[i] = y[i], x[i]
+ }
+}
+
+// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
+func overlaps[E any](a, b []E) bool {
+ if len(a) == 0 || len(b) == 0 {
+ return false
+ }
+ elemSize := unsafe.Sizeof(a[0])
+ if elemSize == 0 {
+ return false
+ }
+ // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
+ // Also see crypto/internal/alias/alias.go:AnyOverlap
+ return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
+ uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
+}
+
+// startIdx returns the index in haystack where the needle starts.
+// prerequisite: the needle must be aliased entirely inside the haystack.
+func startIdx[E any](haystack, needle []E) int {
+ p := &needle[0]
+ for i := range haystack {
+ if p == &haystack[i] {
+ return i
+ }
+ }
+ // TODO: what if the overlap is by a non-integral number of Es?
+ panic("needle not found")
+}
+
+// Reverse reverses the elements of the slice in place.
+func Reverse[S ~[]E, E any](s S) {
+ for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
+ s[i], s[j] = s[j], s[i]
+ }
+}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
new file mode 100644
index 000000000..f58bbc7ba
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -0,0 +1,197 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
+
+package slices
+
+import (
+ "math/bits"
+
+ "golang.org/x/exp/constraints"
+)
+
+// Sort sorts a slice of any ordered type in ascending order.
+// When sorting floating-point numbers, NaNs are ordered before other values.
+func Sort[S ~[]E, E constraints.Ordered](x S) {
+ n := len(x)
+ pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+}
+
+// SortFunc sorts the slice x in ascending order as determined by the cmp
+// function. This sort is not guaranteed to be stable.
+// cmp(a, b) should return a negative number when a < b, a positive number when
+// a > b and zero when a == b or when a is not comparable to b in the sense
+// of the formal definition of Strict Weak Ordering.
+//
+// SortFunc requires that cmp is a strict weak ordering.
+// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
+// To indicate 'uncomparable', return 0 from the function.
+func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ n := len(x)
+ pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
+}
+
+// SortStableFunc sorts the slice x while keeping the original order of equal
+// elements, using cmp to compare elements in the same way as [SortFunc].
+func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
+ stableCmpFunc(x, len(x), cmp)
+}
+
+// IsSorted reports whether x is sorted in ascending order.
+func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmpLess(x[i], x[i-1]) {
+ return false
+ }
+ }
+ return true
+}
+
+// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
+// comparison function as defined by [SortFunc].
+func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
+ for i := len(x) - 1; i > 0; i-- {
+ if cmp(x[i], x[i-1]) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Min returns the minimal value in x. It panics if x is empty.
+// For floating-point numbers, Min propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Min[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Min: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = min(m, x[i])
+ }
+ return m
+}
+
+// MinFunc returns the minimal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one minimal element
+// according to the cmp function, MinFunc returns the first one.
+func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MinFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) < 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// Max returns the maximal value in x. It panics if x is empty.
+// For floating-point E, Max propagates NaNs (any NaN value in x
+// forces the output to be NaN).
+func Max[S ~[]E, E constraints.Ordered](x S) E {
+ if len(x) < 1 {
+ panic("slices.Max: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ m = max(m, x[i])
+ }
+ return m
+}
+
+// MaxFunc returns the maximal value in x, using cmp to compare elements.
+// It panics if x is empty. If there is more than one maximal element
+// according to the cmp function, MaxFunc returns the first one.
+func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
+ if len(x) < 1 {
+ panic("slices.MaxFunc: empty list")
+ }
+ m := x[0]
+ for i := 1; i < len(x); i++ {
+ if cmp(x[i], m) > 0 {
+ m = x[i]
+ }
+ }
+ return m
+}
+
+// BinarySearch searches for target in a sorted slice and returns the position
+// where target is found, or the position where target would appear in the
+// sort order; it also returns a bool saying whether the target is really found
+// in the slice. The slice must be sorted in increasing order.
+func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmpLess(x[h], target) {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
+ }
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
+}
+
+// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
+// function. The slice must be sorted in increasing order, where "increasing"
+// is defined by cmp. cmp should return 0 if the slice element matches
+// the target, a negative number if the slice element precedes the target,
+// or a positive number if the slice element follows the target.
+// cmp must implement the same ordering as the slice, such that if
+// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
+ } else {
+ j = h // preserves cmp(x[j], target) >= 0
+ }
+ }
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
+}
+
+type sortedHint int // hint for pdqsort when choosing the pivot
+
+const (
+ unknownHint sortedHint = iota
+ increasingHint
+ decreasingHint
+)
+
+// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
+type xorshift uint64
+
+func (r *xorshift) Next() uint64 {
+ *r ^= *r << 13
+ *r ^= *r >> 17
+ *r ^= *r << 5
+ return uint64(*r)
+}
+
+func nextPowerOfTwo(length int) uint {
+ return 1 << bits.Len(uint(length))
+}
+
+// isNaN reports whether x is a NaN without requiring the math package.
+// This will always return false if T is not floating-point.
+func isNaN[T constraints.Ordered](x T) bool {
+ return x != x
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
new file mode 100644
index 000000000..06f2c7a24
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
@@ -0,0 +1,479 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+// insertionSortCmpFunc sorts data[a:b] using insertion sort.
+func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownCmpFunc implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
+ child++
+ }
+ if !(cmp(data[first+root], data[first+child]) < 0) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownCmpFunc(data, i, hi, first, cmp)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownCmpFunc(data, lo, i, first, cmp)
+ }
+}
+
+// pdqsortCmpFunc sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortCmpFunc(data, a, b, cmp)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsCmpFunc(data, a, b, cmp)
+ limit--
+ }
+
+ pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
+ if hint == decreasingHint {
+ reverseRangeCmpFunc(data, a, b, cmp)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortCmpFunc(data, a, b, cmp) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
+ mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortCmpFunc(data, a, mid, limit, cmp)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortCmpFunc(data, mid+1, b, limit, cmp)
+ b = mid
+ }
+ }
+}
+
+// partitionCmpFunc does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && (cmp(data[i], data[a]) < 0) {
+ i++
+ }
+ for i <= j && !(cmp(data[j], data[a]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !(cmp(data[a], data[i]) < 0) {
+ i++
+ }
+ for i <= j && (cmp(data[a], data[j]) < 0) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !(cmp(data[i], data[i-1]) < 0) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !(cmp(data[j], data[j-1]) < 0) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotCmpFunc chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
+ j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
+ k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianCmpFunc(data, i, j, k, &swaps, cmp)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
+ if cmp(data[b], data[a]) < 0 {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ b, c = order2CmpFunc(data, b, c, swaps, cmp)
+ a, b = order2CmpFunc(data, a, b, swaps, cmp)
+ return b
+}
+
+// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
+ return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
+}
+
+func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortCmpFunc(data, a, b, cmp)
+ a = b
+ b += blockSize
+ }
+ insertionSortCmpFunc(data, a, n, cmp)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeCmpFunc(data, a, a+blockSize, b, cmp)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeCmpFunc(data, a, m, n, cmp)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmp(data[h], data[a]) < 0 {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !(cmp(data[m], data[h]) < 0) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !(cmp(data[p-c], data[c]) < 0) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateCmpFunc(data, start, m, end, cmp)
+ }
+ if a < start && start < mid {
+ symMergeCmpFunc(data, a, start, mid, cmp)
+ }
+ if mid < end && end < b {
+ symMergeCmpFunc(data, mid, end, b, cmp)
+ }
+}
+
+// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeCmpFunc(data, m-i, m, j, cmp)
+ i -= j
+ } else {
+ swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeCmpFunc(data, m-i, m, i, cmp)
+}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
new file mode 100644
index 000000000..99b47c398
--- /dev/null
+++ b/vendor/golang.org/x/exp/slices/zsortordered.go
@@ -0,0 +1,481 @@
+// Code generated by gen_sort_variants.go; DO NOT EDIT.
+
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package slices
+
+import "golang.org/x/exp/constraints"
+
+// insertionSortOrdered sorts data[a:b] using insertion sort.
+func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// siftDownOrdered implements the heap property on data[lo:hi].
+// first is an offset into the array where the root of the heap lies.
+func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
+ child++
+ }
+ if !cmpLess(data[first+root], data[first+child]) {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+
+func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDownOrdered(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDownOrdered(data, lo, i, first)
+ }
+}
+
+// pdqsortOrdered sorts data[a:b].
+// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
+// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
+// C++ implementation: https://github.com/orlp/pdqsort
+// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
+// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
+func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
+ const maxInsertion = 12
+
+ var (
+ wasBalanced = true // whether the last partitioning was reasonably balanced
+ wasPartitioned = true // whether the slice was already partitioned
+ )
+
+ for {
+ length := b - a
+
+ if length <= maxInsertion {
+ insertionSortOrdered(data, a, b)
+ return
+ }
+
+ // Fall back to heapsort if too many bad choices were made.
+ if limit == 0 {
+ heapSortOrdered(data, a, b)
+ return
+ }
+
+ // If the last partitioning was imbalanced, we need to breaking patterns.
+ if !wasBalanced {
+ breakPatternsOrdered(data, a, b)
+ limit--
+ }
+
+ pivot, hint := choosePivotOrdered(data, a, b)
+ if hint == decreasingHint {
+ reverseRangeOrdered(data, a, b)
+ // The chosen pivot was pivot-a elements after the start of the array.
+ // After reversing it is pivot-a elements before the end of the array.
+ // The idea came from Rust's implementation.
+ pivot = (b - 1) - (pivot - a)
+ hint = increasingHint
+ }
+
+ // The slice is likely already sorted.
+ if wasBalanced && wasPartitioned && hint == increasingHint {
+ if partialInsertionSortOrdered(data, a, b) {
+ return
+ }
+ }
+
+ // Probably the slice contains many duplicate elements, partition the slice into
+ // elements equal to and elements greater than the pivot.
+ if a > 0 && !cmpLess(data[a-1], data[pivot]) {
+ mid := partitionEqualOrdered(data, a, b, pivot)
+ a = mid
+ continue
+ }
+
+ mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
+ wasPartitioned = alreadyPartitioned
+
+ leftLen, rightLen := mid-a, b-mid
+ balanceThreshold := length / 8
+ if leftLen < rightLen {
+ wasBalanced = leftLen >= balanceThreshold
+ pdqsortOrdered(data, a, mid, limit)
+ a = mid + 1
+ } else {
+ wasBalanced = rightLen >= balanceThreshold
+ pdqsortOrdered(data, mid+1, b, limit)
+ b = mid
+ }
+ }
+}
+
+// partitionOrdered does one quicksort partition.
+// Let p = data[pivot]
+// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
+// On return, data[newpivot] = p
+func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ data[j], data[a] = data[a], data[j]
+ return j, true
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+
+ for {
+ for i <= j && cmpLess(data[i], data[a]) {
+ i++
+ }
+ for i <= j && !cmpLess(data[j], data[a]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ data[j], data[a] = data[a], data[j]
+ return j, false
+}
+
+// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
+// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
+func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
+ data[a], data[pivot] = data[pivot], data[a]
+ i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
+
+ for {
+ for i <= j && !cmpLess(data[a], data[i]) {
+ i++
+ }
+ for i <= j && cmpLess(data[a], data[j]) {
+ j--
+ }
+ if i > j {
+ break
+ }
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+ return i
+}
+
+// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
+func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
+ const (
+ maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
+ shortestShifting = 50 // don't shift any elements on short arrays
+ )
+ i := a + 1
+ for j := 0; j < maxSteps; j++ {
+ for i < b && !cmpLess(data[i], data[i-1]) {
+ i++
+ }
+
+ if i == b {
+ return true
+ }
+
+ if b-a < shortestShifting {
+ return false
+ }
+
+ data[i], data[i-1] = data[i-1], data[i]
+
+ // Shift the smaller one to the left.
+ if i-a >= 2 {
+ for j := i - 1; j >= 1; j-- {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ // Shift the greater one to the right.
+ if b-i >= 2 {
+ for j := i + 1; j < b; j++ {
+ if !cmpLess(data[j], data[j-1]) {
+ break
+ }
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+ }
+ return false
+}
+
+// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
+// that might cause imbalanced partitions in quicksort.
+func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
+ length := b - a
+ if length >= 8 {
+ random := xorshift(length)
+ modulus := nextPowerOfTwo(length)
+
+ for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
+ other := int(uint(random.Next()) & (modulus - 1))
+ if other >= length {
+ other -= length
+ }
+ data[idx], data[a+other] = data[a+other], data[idx]
+ }
+ }
+}
+
+// choosePivotOrdered chooses a pivot in data[a:b].
+//
+// [0,8): chooses a static pivot.
+// [8,shortestNinther): uses the simple median-of-three method.
+// [shortestNinther,∞): uses the Tukey ninther method.
+func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
+ const (
+ shortestNinther = 50
+ maxSwaps = 4 * 3
+ )
+
+ l := b - a
+
+ var (
+ swaps int
+ i = a + l/4*1
+ j = a + l/4*2
+ k = a + l/4*3
+ )
+
+ if l >= 8 {
+ if l >= shortestNinther {
+ // Tukey ninther method, the idea came from Rust's implementation.
+ i = medianAdjacentOrdered(data, i, &swaps)
+ j = medianAdjacentOrdered(data, j, &swaps)
+ k = medianAdjacentOrdered(data, k, &swaps)
+ }
+ // Find the median among i, j, k and stores it into j.
+ j = medianOrdered(data, i, j, k, &swaps)
+ }
+
+ switch swaps {
+ case 0:
+ return j, increasingHint
+ case maxSwaps:
+ return j, decreasingHint
+ default:
+ return j, unknownHint
+ }
+}
+
+// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
+func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
+ if cmpLess(data[b], data[a]) {
+ *swaps++
+ return b, a
+ }
+ return a, b
+}
+
+// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
+func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
+ a, b = order2Ordered(data, a, b, swaps)
+ b, c = order2Ordered(data, b, c, swaps)
+ a, b = order2Ordered(data, a, b, swaps)
+ return b
+}
+
+// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
+func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
+ return medianOrdered(data, a-1, a, a+1, swaps)
+}
+
+func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
+ i := a
+ j := b - 1
+ for i < j {
+ data[i], data[j] = data[j], data[i]
+ i++
+ j--
+ }
+}
+
+func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
+ for i := 0; i < n; i++ {
+ data[a+i], data[b+i] = data[b+i], data[a+i]
+ }
+}
+
+func stableOrdered[E constraints.Ordered](data []E, n int) {
+ blockSize := 20 // must be > 0
+ a, b := 0, blockSize
+ for b <= n {
+ insertionSortOrdered(data, a, b)
+ a = b
+ b += blockSize
+ }
+ insertionSortOrdered(data, a, n)
+
+ for blockSize < n {
+ a, b = 0, 2*blockSize
+ for b <= n {
+ symMergeOrdered(data, a, a+blockSize, b)
+ a = b
+ b += 2 * blockSize
+ }
+ if m := a + blockSize; m < n {
+ symMergeOrdered(data, a, m, n)
+ }
+ blockSize *= 2
+ }
+}
+
+// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
+// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
+// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
+// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
+// Computer Science, pages 714-723. Springer, 2004.
+//
+// Let M = m-a and N = b-n. Wolog M < N.
+// The recursion depth is bound by ceil(log(N+M)).
+// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
+// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
+//
+// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
+// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
+// in the paper carries through for Swap operations, especially as the block
+// swapping rotate uses only O(M+N) Swaps.
+//
+// symMerge assumes non-degenerate arguments: a < m && m < b.
+// Having the caller check this condition eliminates many leaf recursion calls,
+// which improves performance.
+func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[a] into data[m:b]
+ // if data[a:m] only contains one element.
+ if m-a == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] >= data[a] for m <= i < b.
+ // Exit the search loop with i == b in case no such index exists.
+ i := m
+ j := b
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if cmpLess(data[h], data[a]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[a] reaches the position before i.
+ for k := a; k < i-1; k++ {
+ data[k], data[k+1] = data[k+1], data[k]
+ }
+ return
+ }
+
+ // Avoid unnecessary recursions of symMerge
+ // by direct insertion of data[m] into data[a:m]
+ // if data[m:b] only contains one element.
+ if b-m == 1 {
+ // Use binary search to find the lowest index i
+ // such that data[i] > data[m] for a <= i < m.
+ // Exit the search loop with i == m in case no such index exists.
+ i := a
+ j := m
+ for i < j {
+ h := int(uint(i+j) >> 1)
+ if !cmpLess(data[m], data[h]) {
+ i = h + 1
+ } else {
+ j = h
+ }
+ }
+ // Swap values until data[m] reaches the position i.
+ for k := m; k > i; k-- {
+ data[k], data[k-1] = data[k-1], data[k]
+ }
+ return
+ }
+
+ mid := int(uint(a+b) >> 1)
+ n := mid + m
+ var start, r int
+ if m > mid {
+ start = n - b
+ r = mid
+ } else {
+ start = a
+ r = m
+ }
+ p := n - 1
+
+ for start < r {
+ c := int(uint(start+r) >> 1)
+ if !cmpLess(data[p-c], data[c]) {
+ start = c + 1
+ } else {
+ r = c
+ }
+ }
+
+ end := n - start
+ if start < m && m < end {
+ rotateOrdered(data, start, m, end)
+ }
+ if a < start && start < mid {
+ symMergeOrdered(data, a, start, mid)
+ }
+ if mid < end && end < b {
+ symMergeOrdered(data, mid, end, b)
+ }
+}
+
+// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
+// Data of the form 'x u v y' is changed to 'x v u y'.
+// rotate performs at most b-a many calls to data.Swap,
+// and it assumes non-degenerate arguments: a < m && m < b.
+func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
+ i := m - a
+ j := b - m
+
+ for i != j {
+ if i > j {
+ swapRangeOrdered(data, m-i, m, j)
+ i -= j
+ } else {
+ swapRangeOrdered(data, m-i, m+j-i, i)
+ j -= i
+ }
+ }
+ // i == j
+ swapRangeOrdered(data, m-i, m, i)
+}
diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go
index 3a7e5ab17..885c4c593 100644
--- a/vendor/golang.org/x/net/html/doc.go
+++ b/vendor/golang.org/x/net/html/doc.go
@@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order:
if err != nil {
// ...
}
- var f func(*html.Node)
- f = func(n *html.Node) {
+ for n := range doc.Descendants() {
if n.Type == html.ElementNode && n.Data == "a" {
// Do something with n...
}
- for c := n.FirstChild; c != nil; c = c.NextSibling {
- f(c)
- }
}
- f(doc)
The relevant specifications include:
https://html.spec.whatwg.org/multipage/syntax.html and
diff --git a/vendor/golang.org/x/net/html/iter.go b/vendor/golang.org/x/net/html/iter.go
new file mode 100644
index 000000000..54be8fd30
--- /dev/null
+++ b/vendor/golang.org/x/net/html/iter.go
@@ -0,0 +1,56 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.23
+
+package html
+
+import "iter"
+
+// Ancestors returns an iterator over the ancestors of n, starting with n.Parent.
+//
+// Mutating a Node or its parents while iterating may have unexpected results.
+func (n *Node) Ancestors() iter.Seq[*Node] {
+ _ = n.Parent // eager nil check
+
+ return func(yield func(*Node) bool) {
+ for p := n.Parent; p != nil && yield(p); p = p.Parent {
+ }
+ }
+}
+
+// ChildNodes returns an iterator over the immediate children of n,
+// starting with n.FirstChild.
+//
+// Mutating a Node or its children while iterating may have unexpected results.
+func (n *Node) ChildNodes() iter.Seq[*Node] {
+ _ = n.FirstChild // eager nil check
+
+ return func(yield func(*Node) bool) {
+ for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling {
+ }
+ }
+
+}
+
+// Descendants returns an iterator over all nodes recursively beneath
+// n, excluding n itself. Nodes are visited in depth-first preorder.
+//
+// Mutating a Node or its descendants while iterating may have unexpected results.
+func (n *Node) Descendants() iter.Seq[*Node] {
+ _ = n.FirstChild // eager nil check
+
+ return func(yield func(*Node) bool) {
+ n.descendants(yield)
+ }
+}
+
+func (n *Node) descendants(yield func(*Node) bool) bool {
+ for c := range n.ChildNodes() {
+ if !yield(c) || !c.descendants(yield) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go
index 1350eef22..77741a195 100644
--- a/vendor/golang.org/x/net/html/node.go
+++ b/vendor/golang.org/x/net/html/node.go
@@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode}
// that it looks like "a cc.idleTimeout
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1578,6 +1668,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
cs.reqBodyClosed = make(chan struct{})
}
bodyClosed := cs.reqBodyClosed
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
cc.mu.Unlock()
if mustCloseBody {
cs.reqBody.Close()
@@ -1602,16 +1693,40 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
if cs.sentHeaders {
if se, ok := err.(StreamError); ok {
if se.Cause != errFromPeer {
- cc.writeStreamReset(cs.ID, se.Code, err)
+ cc.writeStreamReset(cs.ID, se.Code, false, err)
}
} else {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
+ // We're cancelling an in-flight request.
+ //
+ // This could be due to the server becoming unresponsive.
+ // To avoid sending too many requests on a dead connection,
+ // we let the request continue to consume a concurrency slot
+ // until we can confirm the server is still responding.
+ // We do this by sending a PING frame along with the RST_STREAM
+ // (unless a ping is already in flight).
+ //
+ // For simplicity, we don't bother tracking the PING payload:
+ // We reset cc.pendingResets any time we receive a PING ACK.
+ //
+ // We skip this if the conn is going to be closed on idle,
+ // because it's short lived and will probably be closed before
+ // we get the ping response.
+ ping := false
+ if !closeOnIdle {
+ cc.mu.Lock()
+ if cc.pendingResets == 0 {
+ ping = true
+ }
+ cc.pendingResets++
+ cc.mu.Unlock()
+ }
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err)
}
}
cs.bufPipe.CloseWithError(err) // no-op if already closed
} else {
if cs.sentHeaders && !cs.sentEndStream {
- cc.writeStreamReset(cs.ID, ErrCodeNo, nil)
+ cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil)
}
cs.bufPipe.CloseWithError(errRequestCanceled)
}
@@ -1633,12 +1748,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
// Must hold cc.mu.
func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
for {
- cc.lastActive = time.Now()
+ if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 {
+ // This is the very first request sent to this connection.
+ // Return a fatal error which aborts the retry loop.
+ return errClientConnNotEstablished
+ }
+ cc.lastActive = cc.t.now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
cc.lastIdle = time.Time{}
- if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) {
+ if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) {
return nil
}
cc.pendingRequests++
@@ -2180,10 +2300,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
if len(cc.streams) != slen-1 {
panic("forgetting unknown stream id")
}
- cc.lastActive = time.Now()
+ cc.lastActive = cc.t.now()
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = time.Now()
+ cc.lastIdle = cc.t.now()
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
@@ -2243,7 +2363,6 @@ func isEOFOrNetReadError(err error) bool {
func (rl *clientConnReadLoop) cleanup() {
cc := rl.cc
- cc.t.connPool().MarkDead(cc)
defer cc.closeConn()
defer close(cc.readerDone)
@@ -2267,6 +2386,24 @@ func (rl *clientConnReadLoop) cleanup() {
}
cc.closed = true
+ // If the connection has never been used, and has been open for only a short time,
+ // leave it in the connection pool for a little while.
+ //
+ // This avoids a situation where new connections are constantly created,
+ // added to the pool, fail, and are removed from the pool, without any error
+ // being surfaced to the user.
+ const unusedWaitTime = 5 * time.Second
+ idleTime := cc.t.now().Sub(cc.lastActive)
+ if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
+ cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+ cc.t.connPool().MarkDead(cc)
+ })
+ } else {
+ cc.mu.Unlock() // avoid any deadlocks in MarkDead
+ cc.t.connPool().MarkDead(cc)
+ cc.mu.Lock()
+ }
+
for _, cs := range cc.streams {
select {
case <-cs.peerClosed:
@@ -2494,15 +2631,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
if f.StreamEnded() {
return nil, errors.New("1xx informational response with END_STREAM flag")
}
- cs.num1xx++
- const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
- if cs.num1xx > max1xxResponses {
- return nil, errors.New("http2: too many 1xx informational responses")
- }
if fn := cs.get1xxTraceFunc(); fn != nil {
+ // If the 1xx response is being delivered to the user,
+ // then they're responsible for limiting the number
+ // of responses.
if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
return nil, err
}
+ } else {
+ // If the user didn't examine the 1xx response, then we
+ // limit the size of all 1xx headers.
+ //
+ // This differs a bit from the HTTP/1 implementation, which
+ // limits the size of all 1xx headers plus the final response.
+ // Use the larger limit of MaxHeaderListSize and
+ // net/http.Transport.MaxResponseHeaderBytes.
+ limit := int64(cs.cc.t.maxHeaderListSize())
+ if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit {
+ limit = t1.MaxResponseHeaderBytes
+ }
+ for _, h := range f.Fields {
+ cs.totalHeaderSize += int64(h.Size())
+ }
+ if cs.totalHeaderSize > limit {
+ if VerboseLogs {
+ log.Printf("http2: 1xx informational responses too large")
+ }
+ return nil, errors.New("header list too large")
+ }
}
if statusCode == 100 {
traceGot100Continue(cs.trace)
@@ -3046,6 +3202,11 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
close(c)
delete(cc.pings, f.Data)
}
+ if cc.pendingResets > 0 {
+ // See clientStream.cleanupWriteRequest.
+ cc.pendingResets = 0
+ cc.cond.Broadcast()
+ }
return nil
}
cc := rl.cc
@@ -3068,13 +3229,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
return ConnectionError(ErrCodeProtocol)
}
-func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+// writeStreamReset sends a RST_STREAM frame.
+// When ping is true, it also sends a PING frame with a random payload.
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) {
// TODO: map err to more interesting error codes, once the
// HTTP community comes up with some. But currently for
// RST_STREAM there's no equivalent to GOAWAY frame's debug
// data, and the error codes are all pretty vague ("cancel").
cc.wmu.Lock()
cc.fr.WriteRSTStream(streamID, code)
+ if ping {
+ var payload [8]byte
+ rand.Read(payload[:])
+ cc.fr.WritePing(false, payload)
+ }
cc.bw.Flush()
cc.wmu.Unlock()
}
@@ -3228,7 +3396,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = time.Since(cc.lastActive)
+ ci.IdleTime = cc.t.timeSince(cc.lastActive)
}
cc.mu.Unlock()
diff --git a/vendor/golang.org/x/net/http2/unencrypted.go b/vendor/golang.org/x/net/http2/unencrypted.go
new file mode 100644
index 000000000..b2de21161
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/unencrypted.go
@@ -0,0 +1,32 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+)
+
+const nextProtoUnencryptedHTTP2 = "unencrypted_http2"
+
+// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn.
+//
+// TLSNextProto functions accept a *tls.Conn.
+//
+// When passing an unencrypted HTTP/2 connection to a TLSNextProto function,
+// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection.
+// To be extra careful about mistakes (accidentally dropping TLS encryption in a place
+// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method
+// that returns the actual connection we want to use.
+func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) {
+ conner, ok := tc.NetConn().(interface {
+ UnencryptedNetConn() net.Conn
+ })
+ if !ok {
+ return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff")
+ }
+ return conner.UnencryptedNetConn(), nil
+}
diff --git a/vendor/golang.org/x/net/websocket/client.go b/vendor/golang.org/x/net/websocket/client.go
new file mode 100644
index 000000000..1e64157f3
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/client.go
@@ -0,0 +1,139 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "context"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// DialError is an error that occurs while dialling a websocket server.
+type DialError struct {
+ *Config
+ Err error
+}
+
+func (e *DialError) Error() string {
+ return "websocket.Dial " + e.Config.Location.String() + ": " + e.Err.Error()
+}
+
+// NewConfig creates a new WebSocket config for client connection.
+func NewConfig(server, origin string) (config *Config, err error) {
+ config = new(Config)
+ config.Version = ProtocolVersionHybi13
+ config.Location, err = url.ParseRequestURI(server)
+ if err != nil {
+ return
+ }
+ config.Origin, err = url.ParseRequestURI(origin)
+ if err != nil {
+ return
+ }
+ config.Header = http.Header(make(map[string][]string))
+ return
+}
+
+// NewClient creates a new WebSocket client connection over rwc.
+func NewClient(config *Config, rwc io.ReadWriteCloser) (ws *Conn, err error) {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ err = hybiClientHandshake(config, br, bw)
+ if err != nil {
+ return
+ }
+ buf := bufio.NewReadWriter(br, bw)
+ ws = newHybiClientConn(config, buf, rwc)
+ return
+}
+
+// Dial opens a new client connection to a WebSocket.
+func Dial(url_, protocol, origin string) (ws *Conn, err error) {
+ config, err := NewConfig(url_, origin)
+ if err != nil {
+ return nil, err
+ }
+ if protocol != "" {
+ config.Protocol = []string{protocol}
+ }
+ return DialConfig(config)
+}
+
+var portMap = map[string]string{
+ "ws": "80",
+ "wss": "443",
+}
+
+func parseAuthority(location *url.URL) string {
+ if _, ok := portMap[location.Scheme]; ok {
+ if _, _, err := net.SplitHostPort(location.Host); err != nil {
+ return net.JoinHostPort(location.Host, portMap[location.Scheme])
+ }
+ }
+ return location.Host
+}
+
+// DialConfig opens a new client connection to a WebSocket with a config.
+func DialConfig(config *Config) (ws *Conn, err error) {
+ return config.DialContext(context.Background())
+}
+
+// DialContext opens a new client connection to a WebSocket, with context support for timeouts/cancellation.
+func (config *Config) DialContext(ctx context.Context) (*Conn, error) {
+ if config.Location == nil {
+ return nil, &DialError{config, ErrBadWebSocketLocation}
+ }
+ if config.Origin == nil {
+ return nil, &DialError{config, ErrBadWebSocketOrigin}
+ }
+
+ dialer := config.Dialer
+ if dialer == nil {
+ dialer = &net.Dialer{}
+ }
+
+ client, err := dialWithDialer(ctx, dialer, config)
+ if err != nil {
+ return nil, &DialError{config, err}
+ }
+
+ // Cleanup the connection if we fail to create the websocket successfully
+ success := false
+ defer func() {
+ if !success {
+ _ = client.Close()
+ }
+ }()
+
+ var ws *Conn
+ var wsErr error
+ doneConnecting := make(chan struct{})
+ go func() {
+ defer close(doneConnecting)
+ ws, err = NewClient(config, client)
+ if err != nil {
+ wsErr = &DialError{config, err}
+ }
+ }()
+
+ // The websocket.NewClient() function can block indefinitely, make sure that we
+ // respect the deadlines specified by the context.
+ select {
+ case <-ctx.Done():
+ // Force the pending operations to fail, terminating the pending connection attempt
+ _ = client.SetDeadline(time.Now())
+ <-doneConnecting // Wait for the goroutine that tries to establish the connection to finish
+ return nil, &DialError{config, ctx.Err()}
+ case <-doneConnecting:
+ if wsErr == nil {
+ success = true // Disarm the deferred connection cleanup
+ }
+ return ws, wsErr
+ }
+}
diff --git a/vendor/golang.org/x/net/websocket/dial.go b/vendor/golang.org/x/net/websocket/dial.go
new file mode 100644
index 000000000..8a2d83c47
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/dial.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+)
+
+func dialWithDialer(ctx context.Context, dialer *net.Dialer, config *Config) (conn net.Conn, err error) {
+ switch config.Location.Scheme {
+ case "ws":
+ conn, err = dialer.DialContext(ctx, "tcp", parseAuthority(config.Location))
+
+ case "wss":
+ tlsDialer := &tls.Dialer{
+ NetDialer: dialer,
+ Config: config.TlsConfig,
+ }
+
+ conn, err = tlsDialer.DialContext(ctx, "tcp", parseAuthority(config.Location))
+ default:
+ err = ErrBadScheme
+ }
+ return
+}
diff --git a/vendor/golang.org/x/net/websocket/hybi.go b/vendor/golang.org/x/net/websocket/hybi.go
new file mode 100644
index 000000000..dda743466
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/hybi.go
@@ -0,0 +1,582 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+// This file implements a protocol of hybi draft.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/base64"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+)
+
+const (
+ websocketGUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+ closeStatusNormal = 1000
+ closeStatusGoingAway = 1001
+ closeStatusProtocolError = 1002
+ closeStatusUnsupportedData = 1003
+ closeStatusFrameTooLarge = 1004
+ closeStatusNoStatusRcvd = 1005
+ closeStatusAbnormalClosure = 1006
+ closeStatusBadMessageData = 1007
+ closeStatusPolicyViolation = 1008
+ closeStatusTooBigData = 1009
+ closeStatusExtensionMismatch = 1010
+
+ maxControlFramePayloadLength = 125
+)
+
+var (
+ ErrBadMaskingKey = &ProtocolError{"bad masking key"}
+ ErrBadPongMessage = &ProtocolError{"bad pong message"}
+ ErrBadClosingStatus = &ProtocolError{"bad closing status"}
+ ErrUnsupportedExtensions = &ProtocolError{"unsupported extensions"}
+ ErrNotImplemented = &ProtocolError{"not implemented"}
+
+ handshakeHeader = map[string]bool{
+ "Host": true,
+ "Upgrade": true,
+ "Connection": true,
+ "Sec-Websocket-Key": true,
+ "Sec-Websocket-Origin": true,
+ "Sec-Websocket-Version": true,
+ "Sec-Websocket-Protocol": true,
+ "Sec-Websocket-Accept": true,
+ }
+)
+
+// A hybiFrameHeader is a frame header as defined in hybi draft.
+type hybiFrameHeader struct {
+ Fin bool
+ Rsv [3]bool
+ OpCode byte
+ Length int64
+ MaskingKey []byte
+
+ data *bytes.Buffer
+}
+
+// A hybiFrameReader is a reader for hybi frame.
+type hybiFrameReader struct {
+ reader io.Reader
+
+ header hybiFrameHeader
+ pos int64
+ length int
+}
+
+func (frame *hybiFrameReader) Read(msg []byte) (n int, err error) {
+ n, err = frame.reader.Read(msg)
+ if frame.header.MaskingKey != nil {
+ for i := 0; i < n; i++ {
+ msg[i] = msg[i] ^ frame.header.MaskingKey[frame.pos%4]
+ frame.pos++
+ }
+ }
+ return n, err
+}
+
+func (frame *hybiFrameReader) PayloadType() byte { return frame.header.OpCode }
+
+func (frame *hybiFrameReader) HeaderReader() io.Reader {
+ if frame.header.data == nil {
+ return nil
+ }
+ if frame.header.data.Len() == 0 {
+ return nil
+ }
+ return frame.header.data
+}
+
+func (frame *hybiFrameReader) TrailerReader() io.Reader { return nil }
+
+func (frame *hybiFrameReader) Len() (n int) { return frame.length }
+
+// A hybiFrameReaderFactory creates new frame reader based on its frame type.
+type hybiFrameReaderFactory struct {
+ *bufio.Reader
+}
+
+// NewFrameReader reads a frame header from the connection, and creates new reader for the frame.
+// See Section 5.2 Base Framing protocol for detail.
+// http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17#section-5.2
+func (buf hybiFrameReaderFactory) NewFrameReader() (frame frameReader, err error) {
+ hybiFrame := new(hybiFrameReader)
+ frame = hybiFrame
+ var header []byte
+ var b byte
+ // First byte. FIN/RSV1/RSV2/RSV3/OpCode(4bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.Fin = ((header[0] >> 7) & 1) != 0
+ for i := 0; i < 3; i++ {
+ j := uint(6 - i)
+ hybiFrame.header.Rsv[i] = ((header[0] >> j) & 1) != 0
+ }
+ hybiFrame.header.OpCode = header[0] & 0x0f
+
+ // Second byte. Mask/Payload len(7bits)
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ mask := (b & 0x80) != 0
+ b &= 0x7f
+ lengthFields := 0
+ switch {
+ case b <= 125: // Payload length 7bits.
+ hybiFrame.header.Length = int64(b)
+ case b == 126: // Payload length 7+16bits
+ lengthFields = 2
+ case b == 127: // Payload length 7+64bits
+ lengthFields = 8
+ }
+ for i := 0; i < lengthFields; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ if lengthFields == 8 && i == 0 { // MSB must be zero when 7+64 bits
+ b &= 0x7f
+ }
+ header = append(header, b)
+ hybiFrame.header.Length = hybiFrame.header.Length*256 + int64(b)
+ }
+ if mask {
+ // Masking key. 4 bytes.
+ for i := 0; i < 4; i++ {
+ b, err = buf.ReadByte()
+ if err != nil {
+ return
+ }
+ header = append(header, b)
+ hybiFrame.header.MaskingKey = append(hybiFrame.header.MaskingKey, b)
+ }
+ }
+ hybiFrame.reader = io.LimitReader(buf.Reader, hybiFrame.header.Length)
+ hybiFrame.header.data = bytes.NewBuffer(header)
+ hybiFrame.length = len(header) + int(hybiFrame.header.Length)
+ return
+}
+
+// A HybiFrameWriter is a writer for hybi frame.
+type hybiFrameWriter struct {
+ writer *bufio.Writer
+
+ header *hybiFrameHeader
+}
+
+func (frame *hybiFrameWriter) Write(msg []byte) (n int, err error) {
+ var header []byte
+ var b byte
+ if frame.header.Fin {
+ b |= 0x80
+ }
+ for i := 0; i < 3; i++ {
+ if frame.header.Rsv[i] {
+ j := uint(6 - i)
+ b |= 1 << j
+ }
+ }
+ b |= frame.header.OpCode
+ header = append(header, b)
+ if frame.header.MaskingKey != nil {
+ b = 0x80
+ } else {
+ b = 0
+ }
+ lengthFields := 0
+ length := len(msg)
+ switch {
+ case length <= 125:
+ b |= byte(length)
+ case length < 65536:
+ b |= 126
+ lengthFields = 2
+ default:
+ b |= 127
+ lengthFields = 8
+ }
+ header = append(header, b)
+ for i := 0; i < lengthFields; i++ {
+ j := uint((lengthFields - i - 1) * 8)
+ b = byte((length >> j) & 0xff)
+ header = append(header, b)
+ }
+ if frame.header.MaskingKey != nil {
+ if len(frame.header.MaskingKey) != 4 {
+ return 0, ErrBadMaskingKey
+ }
+ header = append(header, frame.header.MaskingKey...)
+ frame.writer.Write(header)
+ data := make([]byte, length)
+ for i := range data {
+ data[i] = msg[i] ^ frame.header.MaskingKey[i%4]
+ }
+ frame.writer.Write(data)
+ err = frame.writer.Flush()
+ return length, err
+ }
+ frame.writer.Write(header)
+ frame.writer.Write(msg)
+ err = frame.writer.Flush()
+ return length, err
+}
+
+func (frame *hybiFrameWriter) Close() error { return nil }
+
+type hybiFrameWriterFactory struct {
+ *bufio.Writer
+ needMaskingKey bool
+}
+
+func (buf hybiFrameWriterFactory) NewFrameWriter(payloadType byte) (frame frameWriter, err error) {
+ frameHeader := &hybiFrameHeader{Fin: true, OpCode: payloadType}
+ if buf.needMaskingKey {
+ frameHeader.MaskingKey, err = generateMaskingKey()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &hybiFrameWriter{writer: buf.Writer, header: frameHeader}, nil
+}
+
+type hybiFrameHandler struct {
+ conn *Conn
+ payloadType byte
+}
+
+func (handler *hybiFrameHandler) HandleFrame(frame frameReader) (frameReader, error) {
+ if handler.conn.IsServerConn() {
+ // The client MUST mask all frames sent to the server.
+ if frame.(*hybiFrameReader).header.MaskingKey == nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ } else {
+ // The server MUST NOT mask all frames.
+ if frame.(*hybiFrameReader).header.MaskingKey != nil {
+ handler.WriteClose(closeStatusProtocolError)
+ return nil, io.EOF
+ }
+ }
+ if header := frame.HeaderReader(); header != nil {
+ io.Copy(io.Discard, header)
+ }
+ switch frame.PayloadType() {
+ case ContinuationFrame:
+ frame.(*hybiFrameReader).header.OpCode = handler.payloadType
+ case TextFrame, BinaryFrame:
+ handler.payloadType = frame.PayloadType()
+ case CloseFrame:
+ return nil, io.EOF
+ case PingFrame, PongFrame:
+ b := make([]byte, maxControlFramePayloadLength)
+ n, err := io.ReadFull(frame, b)
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return nil, err
+ }
+ io.Copy(io.Discard, frame)
+ if frame.PayloadType() == PingFrame {
+ if _, err := handler.WritePong(b[:n]); err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ }
+ return frame, nil
+}
+
+func (handler *hybiFrameHandler) WriteClose(status int) (err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(CloseFrame)
+ if err != nil {
+ return err
+ }
+ msg := make([]byte, 2)
+ binary.BigEndian.PutUint16(msg, uint16(status))
+ _, err = w.Write(msg)
+ w.Close()
+ return err
+}
+
+func (handler *hybiFrameHandler) WritePong(msg []byte) (n int, err error) {
+ handler.conn.wio.Lock()
+ defer handler.conn.wio.Unlock()
+ w, err := handler.conn.frameWriterFactory.NewFrameWriter(PongFrame)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// newHybiConn creates a new WebSocket connection speaking hybi draft protocol.
+func newHybiConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ if buf == nil {
+ br := bufio.NewReader(rwc)
+ bw := bufio.NewWriter(rwc)
+ buf = bufio.NewReadWriter(br, bw)
+ }
+ ws := &Conn{config: config, request: request, buf: buf, rwc: rwc,
+ frameReaderFactory: hybiFrameReaderFactory{buf.Reader},
+ frameWriterFactory: hybiFrameWriterFactory{
+ buf.Writer, request == nil},
+ PayloadType: TextFrame,
+ defaultCloseStatus: closeStatusNormal}
+ ws.frameHandler = &hybiFrameHandler{conn: ws}
+ return ws
+}
+
+// generateMaskingKey generates a masking key for a frame.
+func generateMaskingKey() (maskingKey []byte, err error) {
+ maskingKey = make([]byte, 4)
+ if _, err = io.ReadFull(rand.Reader, maskingKey); err != nil {
+ return
+ }
+ return
+}
+
+// generateNonce generates a nonce consisting of a randomly selected 16-byte
+// value that has been base64-encoded.
+func generateNonce() (nonce []byte) {
+ key := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ panic(err)
+ }
+ nonce = make([]byte, 24)
+ base64.StdEncoding.Encode(nonce, key)
+ return
+}
+
+// removeZone removes IPv6 zone identifier from host.
+// E.g., "[fe80::1%en0]:8080" to "[fe80::1]:8080"
+func removeZone(host string) string {
+ if !strings.HasPrefix(host, "[") {
+ return host
+ }
+ i := strings.LastIndex(host, "]")
+ if i < 0 {
+ return host
+ }
+ j := strings.LastIndex(host[:i], "%")
+ if j < 0 {
+ return host
+ }
+ return host[:j] + host[i:]
+}
+
+// getNonceAccept computes the base64-encoded SHA-1 of the concatenation of
+// the nonce ("Sec-WebSocket-Key" value) with the websocket GUID string.
+func getNonceAccept(nonce []byte) (expected []byte, err error) {
+ h := sha1.New()
+ if _, err = h.Write(nonce); err != nil {
+ return
+ }
+ if _, err = h.Write([]byte(websocketGUID)); err != nil {
+ return
+ }
+ expected = make([]byte, 28)
+ base64.StdEncoding.Encode(expected, h.Sum(nil))
+ return
+}
+
+// Client handshake described in draft-ietf-hybi-thewebsocket-protocol-17
+func hybiClientHandshake(config *Config, br *bufio.Reader, bw *bufio.Writer) (err error) {
+ bw.WriteString("GET " + config.Location.RequestURI() + " HTTP/1.1\r\n")
+
+ // According to RFC 6874, an HTTP client, proxy, or other
+ // intermediary must remove any IPv6 zone identifier attached
+ // to an outgoing URI.
+ bw.WriteString("Host: " + removeZone(config.Location.Host) + "\r\n")
+ bw.WriteString("Upgrade: websocket\r\n")
+ bw.WriteString("Connection: Upgrade\r\n")
+ nonce := generateNonce()
+ if config.handshakeData != nil {
+ nonce = []byte(config.handshakeData["key"])
+ }
+ bw.WriteString("Sec-WebSocket-Key: " + string(nonce) + "\r\n")
+ bw.WriteString("Origin: " + strings.ToLower(config.Origin.String()) + "\r\n")
+
+ if config.Version != ProtocolVersionHybi13 {
+ return ErrBadProtocolVersion
+ }
+
+ bw.WriteString("Sec-WebSocket-Version: " + fmt.Sprintf("%d", config.Version) + "\r\n")
+ if len(config.Protocol) > 0 {
+ bw.WriteString("Sec-WebSocket-Protocol: " + strings.Join(config.Protocol, ", ") + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ err = config.Header.WriteSubset(bw, handshakeHeader)
+ if err != nil {
+ return err
+ }
+
+ bw.WriteString("\r\n")
+ if err = bw.Flush(); err != nil {
+ return err
+ }
+
+ resp, err := http.ReadResponse(br, &http.Request{Method: "GET"})
+ if err != nil {
+ return err
+ }
+ if resp.StatusCode != 101 {
+ return ErrBadStatus
+ }
+ if strings.ToLower(resp.Header.Get("Upgrade")) != "websocket" ||
+ strings.ToLower(resp.Header.Get("Connection")) != "upgrade" {
+ return ErrBadUpgrade
+ }
+ expectedAccept, err := getNonceAccept(nonce)
+ if err != nil {
+ return err
+ }
+ if resp.Header.Get("Sec-WebSocket-Accept") != string(expectedAccept) {
+ return ErrChallengeResponse
+ }
+ if resp.Header.Get("Sec-WebSocket-Extensions") != "" {
+ return ErrUnsupportedExtensions
+ }
+ offeredProtocol := resp.Header.Get("Sec-WebSocket-Protocol")
+ if offeredProtocol != "" {
+ protocolMatched := false
+ for i := 0; i < len(config.Protocol); i++ {
+ if config.Protocol[i] == offeredProtocol {
+ protocolMatched = true
+ break
+ }
+ }
+ if !protocolMatched {
+ return ErrBadWebSocketProtocol
+ }
+ config.Protocol = []string{offeredProtocol}
+ }
+
+ return nil
+}
+
+// newHybiClientConn creates a client WebSocket connection after handshake.
+func newHybiClientConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser) *Conn {
+ return newHybiConn(config, buf, rwc, nil)
+}
+
+// A HybiServerHandshaker performs a server handshake using hybi draft protocol.
+type hybiServerHandshaker struct {
+ *Config
+ accept []byte
+}
+
+func (c *hybiServerHandshaker) ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error) {
+ c.Version = ProtocolVersionHybi13
+ if req.Method != "GET" {
+ return http.StatusMethodNotAllowed, ErrBadRequestMethod
+ }
+ // HTTP version can be safely ignored.
+
+ if strings.ToLower(req.Header.Get("Upgrade")) != "websocket" ||
+ !strings.Contains(strings.ToLower(req.Header.Get("Connection")), "upgrade") {
+ return http.StatusBadRequest, ErrNotWebSocket
+ }
+
+ key := req.Header.Get("Sec-Websocket-Key")
+ if key == "" {
+ return http.StatusBadRequest, ErrChallengeResponse
+ }
+ version := req.Header.Get("Sec-Websocket-Version")
+ switch version {
+ case "13":
+ c.Version = ProtocolVersionHybi13
+ default:
+ return http.StatusBadRequest, ErrBadWebSocketVersion
+ }
+ var scheme string
+ if req.TLS != nil {
+ scheme = "wss"
+ } else {
+ scheme = "ws"
+ }
+ c.Location, err = url.ParseRequestURI(scheme + "://" + req.Host + req.URL.RequestURI())
+ if err != nil {
+ return http.StatusBadRequest, err
+ }
+ protocol := strings.TrimSpace(req.Header.Get("Sec-Websocket-Protocol"))
+ if protocol != "" {
+ protocols := strings.Split(protocol, ",")
+ for i := 0; i < len(protocols); i++ {
+ c.Protocol = append(c.Protocol, strings.TrimSpace(protocols[i]))
+ }
+ }
+ c.accept, err = getNonceAccept([]byte(key))
+ if err != nil {
+ return http.StatusInternalServerError, err
+ }
+ return http.StatusSwitchingProtocols, nil
+}
+
+// Origin parses the Origin header in req.
+// If the Origin header is not set, it returns nil and nil.
+func Origin(config *Config, req *http.Request) (*url.URL, error) {
+ var origin string
+ switch config.Version {
+ case ProtocolVersionHybi13:
+ origin = req.Header.Get("Origin")
+ }
+ if origin == "" {
+ return nil, nil
+ }
+ return url.ParseRequestURI(origin)
+}
+
+func (c *hybiServerHandshaker) AcceptHandshake(buf *bufio.Writer) (err error) {
+ if len(c.Protocol) > 0 {
+ if len(c.Protocol) != 1 {
+ // You need choose a Protocol in Handshake func in Server.
+ return ErrBadWebSocketProtocol
+ }
+ }
+ buf.WriteString("HTTP/1.1 101 Switching Protocols\r\n")
+ buf.WriteString("Upgrade: websocket\r\n")
+ buf.WriteString("Connection: Upgrade\r\n")
+ buf.WriteString("Sec-WebSocket-Accept: " + string(c.accept) + "\r\n")
+ if len(c.Protocol) > 0 {
+ buf.WriteString("Sec-WebSocket-Protocol: " + c.Protocol[0] + "\r\n")
+ }
+ // TODO(ukai): send Sec-WebSocket-Extensions.
+ if c.Header != nil {
+ err := c.Header.WriteSubset(buf, handshakeHeader)
+ if err != nil {
+ return err
+ }
+ }
+ buf.WriteString("\r\n")
+ return buf.Flush()
+}
+
+func (c *hybiServerHandshaker) NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiServerConn(c.Config, buf, rwc, request)
+}
+
+// newHybiServerConn returns a new WebSocket connection speaking hybi draft protocol.
+func newHybiServerConn(config *Config, buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) *Conn {
+ return newHybiConn(config, buf, rwc, request)
+}
diff --git a/vendor/golang.org/x/net/websocket/server.go b/vendor/golang.org/x/net/websocket/server.go
new file mode 100644
index 000000000..0895dea19
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/server.go
@@ -0,0 +1,113 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package websocket
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+func newServerConn(rwc io.ReadWriteCloser, buf *bufio.ReadWriter, req *http.Request, config *Config, handshake func(*Config, *http.Request) error) (conn *Conn, err error) {
+ var hs serverHandshaker = &hybiServerHandshaker{Config: config}
+ code, err := hs.ReadHandshake(buf.Reader, req)
+ if err == ErrBadWebSocketVersion {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ fmt.Fprintf(buf, "Sec-WebSocket-Version: %s\r\n", SupportedProtocolVersion)
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if err != nil {
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.WriteString(err.Error())
+ buf.Flush()
+ return
+ }
+ if handshake != nil {
+ err = handshake(config, req)
+ if err != nil {
+ code = http.StatusForbidden
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ }
+ err = hs.AcceptHandshake(buf.Writer)
+ if err != nil {
+ code = http.StatusBadRequest
+ fmt.Fprintf(buf, "HTTP/1.1 %03d %s\r\n", code, http.StatusText(code))
+ buf.WriteString("\r\n")
+ buf.Flush()
+ return
+ }
+ conn = hs.NewServerConn(buf, rwc, req)
+ return
+}
+
+// Server represents a server of a WebSocket.
+type Server struct {
+ // Config is a WebSocket configuration for new WebSocket connection.
+ Config
+
+ // Handshake is an optional function in WebSocket handshake.
+ // For example, you can check, or don't check Origin header.
+ // Another example, you can select config.Protocol.
+ Handshake func(*Config, *http.Request) error
+
+ // Handler handles a WebSocket connection.
+ Handler
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (s Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s.serveWebSocket(w, req)
+}
+
+func (s Server) serveWebSocket(w http.ResponseWriter, req *http.Request) {
+ rwc, buf, err := w.(http.Hijacker).Hijack()
+ if err != nil {
+ panic("Hijack failed: " + err.Error())
+ }
+ // The server should abort the WebSocket connection if it finds
+ // the client did not send a handshake that matches with protocol
+ // specification.
+ defer rwc.Close()
+ conn, err := newServerConn(rwc, buf, req, &s.Config, s.Handshake)
+ if err != nil {
+ return
+ }
+ if conn == nil {
+ panic("unexpected nil conn")
+ }
+ s.Handler(conn)
+}
+
+// Handler is a simple interface to a WebSocket browser client.
+// It checks if Origin header is valid URL by default.
+// You might want to verify websocket.Conn.Config().Origin in the func.
+// If you use Server instead of Handler, you could call websocket.Origin and
+// check the origin in your Handshake func. So, if you want to accept
+// non-browser clients, which do not send an Origin header, set a
+// Server.Handshake that does not check the origin.
+type Handler func(*Conn)
+
+func checkOrigin(config *Config, req *http.Request) (err error) {
+ config.Origin, err = Origin(config, req)
+ if err == nil && config.Origin == nil {
+ return fmt.Errorf("null origin")
+ }
+ return err
+}
+
+// ServeHTTP implements the http.Handler interface for a WebSocket
+func (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ s := Server{Handler: h, Handshake: checkOrigin}
+ s.serveWebSocket(w, req)
+}
diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go
new file mode 100644
index 000000000..ac76165ce
--- /dev/null
+++ b/vendor/golang.org/x/net/websocket/websocket.go
@@ -0,0 +1,448 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package websocket implements a client and server for the WebSocket protocol
+// as specified in RFC 6455.
+//
+// This package currently lacks some features found in an alternative
+// and more actively maintained WebSocket package:
+//
+// https://pkg.go.dev/github.com/coder/websocket
+package websocket // import "golang.org/x/net/websocket"
+
+import (
+ "bufio"
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "sync"
+ "time"
+)
+
+const (
+ ProtocolVersionHybi13 = 13
+ ProtocolVersionHybi = ProtocolVersionHybi13
+ SupportedProtocolVersion = "13"
+
+ ContinuationFrame = 0
+ TextFrame = 1
+ BinaryFrame = 2
+ CloseFrame = 8
+ PingFrame = 9
+ PongFrame = 10
+ UnknownFrame = 255
+
+ DefaultMaxPayloadBytes = 32 << 20 // 32MB
+)
+
+// ProtocolError represents WebSocket protocol errors.
+type ProtocolError struct {
+ ErrorString string
+}
+
+func (err *ProtocolError) Error() string { return err.ErrorString }
+
+var (
+ ErrBadProtocolVersion = &ProtocolError{"bad protocol version"}
+ ErrBadScheme = &ProtocolError{"bad scheme"}
+ ErrBadStatus = &ProtocolError{"bad status"}
+ ErrBadUpgrade = &ProtocolError{"missing or bad upgrade"}
+ ErrBadWebSocketOrigin = &ProtocolError{"missing or bad WebSocket-Origin"}
+ ErrBadWebSocketLocation = &ProtocolError{"missing or bad WebSocket-Location"}
+ ErrBadWebSocketProtocol = &ProtocolError{"missing or bad WebSocket-Protocol"}
+ ErrBadWebSocketVersion = &ProtocolError{"missing or bad WebSocket Version"}
+ ErrChallengeResponse = &ProtocolError{"mismatch challenge/response"}
+ ErrBadFrame = &ProtocolError{"bad frame"}
+ ErrBadFrameBoundary = &ProtocolError{"not on frame boundary"}
+ ErrNotWebSocket = &ProtocolError{"not websocket protocol"}
+ ErrBadRequestMethod = &ProtocolError{"bad method"}
+ ErrNotSupported = &ProtocolError{"not supported"}
+)
+
+// ErrFrameTooLarge is returned by Codec's Receive method if payload size
+// exceeds limit set by Conn.MaxPayloadBytes
+var ErrFrameTooLarge = errors.New("websocket: frame payload size exceeds limit")
+
+// Addr is an implementation of net.Addr for WebSocket.
+type Addr struct {
+ *url.URL
+}
+
+// Network returns the network type for a WebSocket, "websocket".
+func (addr *Addr) Network() string { return "websocket" }
+
+// Config is a WebSocket configuration
+type Config struct {
+ // A WebSocket server address.
+ Location *url.URL
+
+ // A Websocket client origin.
+ Origin *url.URL
+
+ // WebSocket subprotocols.
+ Protocol []string
+
+ // WebSocket protocol version.
+ Version int
+
+ // TLS config for secure WebSocket (wss).
+ TlsConfig *tls.Config
+
+ // Additional header fields to be sent in WebSocket opening handshake.
+ Header http.Header
+
+ // Dialer used when opening websocket connections.
+ Dialer *net.Dialer
+
+ handshakeData map[string]string
+}
+
+// serverHandshaker is an interface to handle WebSocket server side handshake.
+type serverHandshaker interface {
+ // ReadHandshake reads handshake request message from client.
+ // Returns http response code and error if any.
+ ReadHandshake(buf *bufio.Reader, req *http.Request) (code int, err error)
+
+ // AcceptHandshake accepts the client handshake request and sends
+ // handshake response back to client.
+ AcceptHandshake(buf *bufio.Writer) (err error)
+
+ // NewServerConn creates a new WebSocket connection.
+ NewServerConn(buf *bufio.ReadWriter, rwc io.ReadWriteCloser, request *http.Request) (conn *Conn)
+}
+
+// frameReader is an interface to read a WebSocket frame.
+type frameReader interface {
+ // Reader is to read payload of the frame.
+ io.Reader
+
+ // PayloadType returns payload type.
+ PayloadType() byte
+
+ // HeaderReader returns a reader to read header of the frame.
+ HeaderReader() io.Reader
+
+ // TrailerReader returns a reader to read trailer of the frame.
+ // If it returns nil, there is no trailer in the frame.
+ TrailerReader() io.Reader
+
+ // Len returns total length of the frame, including header and trailer.
+ Len() int
+}
+
+// frameReaderFactory is an interface to creates new frame reader.
+type frameReaderFactory interface {
+ NewFrameReader() (r frameReader, err error)
+}
+
+// frameWriter is an interface to write a WebSocket frame.
+type frameWriter interface {
+ // Writer is to write payload of the frame.
+ io.WriteCloser
+}
+
+// frameWriterFactory is an interface to create new frame writer.
+type frameWriterFactory interface {
+ NewFrameWriter(payloadType byte) (w frameWriter, err error)
+}
+
+type frameHandler interface {
+ HandleFrame(frame frameReader) (r frameReader, err error)
+ WriteClose(status int) (err error)
+}
+
+// Conn represents a WebSocket connection.
+//
+// Multiple goroutines may invoke methods on a Conn simultaneously.
+type Conn struct {
+ config *Config
+ request *http.Request
+
+ buf *bufio.ReadWriter
+ rwc io.ReadWriteCloser
+
+ rio sync.Mutex
+ frameReaderFactory
+ frameReader
+
+ wio sync.Mutex
+ frameWriterFactory
+
+ frameHandler
+ PayloadType byte
+ defaultCloseStatus int
+
+ // MaxPayloadBytes limits the size of frame payload received over Conn
+ // by Codec's Receive method. If zero, DefaultMaxPayloadBytes is used.
+ MaxPayloadBytes int
+}
+
+// Read implements the io.Reader interface:
+// it reads data of a frame from the WebSocket connection.
+// if msg is not large enough for the frame data, it fills the msg and next Read
+// will read the rest of the frame data.
+// it reads Text frame or Binary frame.
+func (ws *Conn) Read(msg []byte) (n int, err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+again:
+ if ws.frameReader == nil {
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return 0, err
+ }
+ ws.frameReader, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return 0, err
+ }
+ if ws.frameReader == nil {
+ goto again
+ }
+ }
+ n, err = ws.frameReader.Read(msg)
+ if err == io.EOF {
+ if trailer := ws.frameReader.TrailerReader(); trailer != nil {
+ io.Copy(io.Discard, trailer)
+ }
+ ws.frameReader = nil
+ goto again
+ }
+ return n, err
+}
+
+// Write implements the io.Writer interface:
+// it writes data as a frame to the WebSocket connection.
+func (ws *Conn) Write(msg []byte) (n int, err error) {
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(ws.PayloadType)
+ if err != nil {
+ return 0, err
+ }
+ n, err = w.Write(msg)
+ w.Close()
+ return n, err
+}
+
+// Close implements the io.Closer interface.
+func (ws *Conn) Close() error {
+ err := ws.frameHandler.WriteClose(ws.defaultCloseStatus)
+ err1 := ws.rwc.Close()
+ if err != nil {
+ return err
+ }
+ return err1
+}
+
+// IsClientConn reports whether ws is a client-side connection.
+func (ws *Conn) IsClientConn() bool { return ws.request == nil }
+
+// IsServerConn reports whether ws is a server-side connection.
+func (ws *Conn) IsServerConn() bool { return ws.request != nil }
+
+// LocalAddr returns the WebSocket Origin for the connection for client, or
+// the WebSocket location for server.
+func (ws *Conn) LocalAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Origin}
+ }
+ return &Addr{ws.config.Location}
+}
+
+// RemoteAddr returns the WebSocket location for the connection for client, or
+// the Websocket Origin for server.
+func (ws *Conn) RemoteAddr() net.Addr {
+ if ws.IsClientConn() {
+ return &Addr{ws.config.Location}
+ }
+ return &Addr{ws.config.Origin}
+}
+
+var errSetDeadline = errors.New("websocket: cannot set deadline: not using a net.Conn")
+
+// SetDeadline sets the connection's network read & write deadlines.
+func (ws *Conn) SetDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetReadDeadline sets the connection's network read deadline.
+func (ws *Conn) SetReadDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetReadDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// SetWriteDeadline sets the connection's network write deadline.
+func (ws *Conn) SetWriteDeadline(t time.Time) error {
+ if conn, ok := ws.rwc.(net.Conn); ok {
+ return conn.SetWriteDeadline(t)
+ }
+ return errSetDeadline
+}
+
+// Config returns the WebSocket config.
+func (ws *Conn) Config() *Config { return ws.config }
+
+// Request returns the http request upgraded to the WebSocket.
+// It is nil for client side.
+func (ws *Conn) Request() *http.Request { return ws.request }
+
+// Codec represents a symmetric pair of functions that implement a codec.
+type Codec struct {
+ Marshal func(v interface{}) (data []byte, payloadType byte, err error)
+ Unmarshal func(data []byte, payloadType byte, v interface{}) (err error)
+}
+
+// Send sends v marshaled by cd.Marshal as single frame to ws.
+func (cd Codec) Send(ws *Conn, v interface{}) (err error) {
+ data, payloadType, err := cd.Marshal(v)
+ if err != nil {
+ return err
+ }
+ ws.wio.Lock()
+ defer ws.wio.Unlock()
+ w, err := ws.frameWriterFactory.NewFrameWriter(payloadType)
+ if err != nil {
+ return err
+ }
+ _, err = w.Write(data)
+ w.Close()
+ return err
+}
+
+// Receive receives single frame from ws, unmarshaled by cd.Unmarshal and stores
+// in v. The whole frame payload is read to an in-memory buffer; max size of
+// payload is defined by ws.MaxPayloadBytes. If frame payload size exceeds
+// limit, ErrFrameTooLarge is returned; in this case frame is not read off wire
+// completely. The next call to Receive would read and discard leftover data of
+// previous oversized frame before processing next frame.
+func (cd Codec) Receive(ws *Conn, v interface{}) (err error) {
+ ws.rio.Lock()
+ defer ws.rio.Unlock()
+ if ws.frameReader != nil {
+ _, err = io.Copy(io.Discard, ws.frameReader)
+ if err != nil {
+ return err
+ }
+ ws.frameReader = nil
+ }
+again:
+ frame, err := ws.frameReaderFactory.NewFrameReader()
+ if err != nil {
+ return err
+ }
+ frame, err = ws.frameHandler.HandleFrame(frame)
+ if err != nil {
+ return err
+ }
+ if frame == nil {
+ goto again
+ }
+ maxPayloadBytes := ws.MaxPayloadBytes
+ if maxPayloadBytes == 0 {
+ maxPayloadBytes = DefaultMaxPayloadBytes
+ }
+ if hf, ok := frame.(*hybiFrameReader); ok && hf.header.Length > int64(maxPayloadBytes) {
+ // payload size exceeds limit, no need to call Unmarshal
+ //
+ // set frameReader to current oversized frame so that
+ // the next call to this function can drain leftover
+ // data before processing the next frame
+ ws.frameReader = frame
+ return ErrFrameTooLarge
+ }
+ payloadType := frame.PayloadType()
+ data, err := io.ReadAll(frame)
+ if err != nil {
+ return err
+ }
+ return cd.Unmarshal(data, payloadType, v)
+}
+
+func marshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ switch data := v.(type) {
+ case string:
+ return []byte(data), TextFrame, nil
+ case []byte:
+ return data, BinaryFrame, nil
+ }
+ return nil, UnknownFrame, ErrNotSupported
+}
+
+func unmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ switch data := v.(type) {
+ case *string:
+ *data = string(msg)
+ return nil
+ case *[]byte:
+ *data = msg
+ return nil
+ }
+ return ErrNotSupported
+}
+
+/*
+Message is a codec to send/receive text/binary data in a frame on WebSocket connection.
+To send/receive text frame, use string type.
+To send/receive binary frame, use []byte type.
+
+Trivial usage:
+
+ import "websocket"
+
+ // receive text frame
+ var message string
+ websocket.Message.Receive(ws, &message)
+
+ // send text frame
+ message = "hello"
+ websocket.Message.Send(ws, message)
+
+ // receive binary frame
+ var data []byte
+ websocket.Message.Receive(ws, &data)
+
+ // send binary frame
+ data = []byte{0, 1, 2}
+ websocket.Message.Send(ws, data)
+*/
+var Message = Codec{marshal, unmarshal}
+
+func jsonMarshal(v interface{}) (msg []byte, payloadType byte, err error) {
+ msg, err = json.Marshal(v)
+ return msg, TextFrame, err
+}
+
+func jsonUnmarshal(msg []byte, payloadType byte, v interface{}) (err error) {
+ return json.Unmarshal(msg, v)
+}
+
+/*
+JSON is a codec to send/receive JSON data in a frame from a WebSocket connection.
+
+Trivial usage:
+
+ import "websocket"
+
+ type T struct {
+ Msg string
+ Count int
+ }
+
+ // receive JSON type T
+ var data T
+ websocket.JSON.Receive(ws, &data)
+
+ // send JSON type T
+ websocket.JSON.Send(ws, data)
+*/
+var JSON = Codec{jsonMarshal, jsonUnmarshal}
diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE
index 6a66aea5e..2a7cf70da 100644
--- a/vendor/golang.org/x/oauth2/LICENSE
+++ b/vendor/golang.org/x/oauth2/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
index 781770c20..48dbb9d84 100644
--- a/vendor/golang.org/x/oauth2/README.md
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -5,15 +5,6 @@
oauth2 package contains a client implementation for OAuth 2.0 spec.
-## Installation
-
-~~~~
-go get golang.org/x/oauth2
-~~~~
-
-Or you can manually git clone the repository to
-`$(go env GOPATH)/src/golang.org/x/oauth2`.
-
See pkg.go.dev for further documentation and examples.
* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2)
@@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at
https://github.com/golang/oauth2/issues.
This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html. In particular:
+this repository, see https://go.dev/doc/contribute.
+
+The git repository is https://go.googlesource.com/oauth2.
+
+Note:
* Excluding trivial changes, all contributions should be connected to an existing issue.
* API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted.
diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go
deleted file mode 100644
index d28140f78..000000000
--- a/vendor/golang.org/x/oauth2/internal/client_appengine.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build appengine
-
-package internal
-
-import "google.golang.org/appengine/urlfetch"
-
-func init() {
- appengineClientHook = urlfetch.Client
-}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
index 572074a63..b9db01ddf 100644
--- a/vendor/golang.org/x/oauth2/internal/transport.go
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -18,16 +18,11 @@ var HTTPClient ContextKey
// because nobody else can create a ContextKey, being unexported.
type ContextKey struct{}
-var appengineClientHook func(context.Context) *http.Client
-
func ContextClient(ctx context.Context) *http.Client {
if ctx != nil {
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
return hc
}
}
- if appengineClientHook != nil {
- return appengineClientHook(ctx)
- }
return http.DefaultClient
}
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
index 90a2c3d6d..09f6a49b8 100644
--- a/vendor/golang.org/x/oauth2/oauth2.go
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -393,7 +393,7 @@ func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
}
}
-// ReuseTokenSource returns a TokenSource that acts in the same manner as the
+// ReuseTokenSourceWithExpiry returns a TokenSource that acts in the same manner as the
// TokenSource returned by ReuseTokenSource, except the expiry buffer is
// configurable. The expiration time of a token is calculated as
// t.Expiry.Add(-earlyExpiry).
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
index 5bbb33217..109997d77 100644
--- a/vendor/golang.org/x/oauth2/token.go
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -49,6 +49,13 @@ type Token struct {
// mechanisms for that TokenSource will not be used.
Expiry time.Time `json:"expiry,omitempty"`
+ // ExpiresIn is the OAuth2 wire format "expires_in" field,
+ // which specifies how many seconds later the token expires,
+ // relative to an unknown time base approximately around "now".
+ // It is the application's responsibility to populate
+ // `Expiry` from `ExpiresIn` when required.
+ ExpiresIn int64 `json:"expires_in,omitempty"`
+
// raw optionally contains extra metadata from the server
// when updating a token.
raw interface{}
diff --git a/vendor/golang.org/x/sys/unix/ioctl_linux.go b/vendor/golang.org/x/sys/unix/ioctl_linux.go
index dbe680eab..7ca4fa12a 100644
--- a/vendor/golang.org/x/sys/unix/ioctl_linux.go
+++ b/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
return &value, err
}
+// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC
+// association for the network device specified by ifname.
+func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return nil, err
+ }
+
+ value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO}
+ ifrd := ifr.withData(unsafe.Pointer(&value))
+
+ err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
+ return &value, err
+}
+
+// IoctlGetHwTstamp retrieves the hardware timestamping configuration
+// for the network device specified by ifname.
+func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return nil, err
+ }
+
+ value := HwTstampConfig{}
+ ifrd := ifr.withData(unsafe.Pointer(&value))
+
+ err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd)
+ return &value, err
+}
+
+// IoctlSetHwTstamp updates the hardware timestamping configuration for
+// the network device specified by ifname.
+func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return err
+ }
+ ifrd := ifr.withData(unsafe.Pointer(cfg))
+ return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd)
+}
+
+// FdToClockID derives the clock ID from the file descriptor number
+// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is
+// suitable for system calls like ClockGettime.
+func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) }
+
+// IoctlPtpClockGetcaps returns the description of a given PTP device.
+func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) {
+ var value PtpClockCaps
+ err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpSysOffsetPrecise returns a description of the clock
+// offset compared to the system clock.
+func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) {
+ var value PtpSysOffsetPrecise
+ err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpSysOffsetExtended returns an extended description of the
+// clock offset compared to the system clock. The samples parameter
+// specifies the desired number of measurements.
+func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) {
+ value := PtpSysOffsetExtended{Samples: uint32(samples)}
+ err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpPinGetfunc returns the configuration of the specified
+// I/O pin on given PTP device.
+func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) {
+ value := PtpPinDesc{Index: uint32(index)}
+ err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpPinSetfunc updates configuration of the specified PTP
+// I/O pin.
+func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error {
+ return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd))
+}
+
+// IoctlPtpPeroutRequest configures the periodic output mode of the
+// PTP I/O pins.
+func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error {
+ return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r))
+}
+
+// IoctlPtpExttsRequest configures the external timestamping mode
+// of the PTP I/O pins.
+func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error {
+ return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r))
+}
+
// IoctlGetWatchdogInfo fetches information about a watchdog device from the
// Linux watchdog API. For more information, see:
// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index ac54ecaba..6ab02b6c3 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -158,6 +158,16 @@ includes_Linux='
#endif
#define _GNU_SOURCE
+// See the description in unix/linux/types.go
+#if defined(__ARM_EABI__) || \
+ (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \
+ (defined(__powerpc__) && (!defined(__powerpc64__)))
+# ifdef _TIME_BITS
+# undef _TIME_BITS
+# endif
+# define _TIME_BITS 32
+#endif
+
// is broken on powerpc64, as it fails to include definitions of
// these structures. We just include them copied from .
#if defined(__powerpc__)
@@ -256,6 +266,7 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
#include
@@ -527,6 +538,7 @@ ccflags="$@"
$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ ||
$2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ ||
$2 ~ /^NFC_.*_(MAX)?SIZE$/ ||
+ $2 ~ /^PTP_/ ||
$2 ~ /^RAW_PAYLOAD_/ ||
$2 ~ /^[US]F_/ ||
$2 ~ /^TP_STATUS_/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index f08abd434..230a94549 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sys ClockAdjtime(clockid int32, buf *Timex) (state int, err error)
//sys ClockGetres(clockid int32, res *Timespec) (err error)
//sys ClockGettime(clockid int32, time *Timespec) (err error)
+//sys ClockSettime(clockid int32, time *Timespec) (err error)
//sys ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
//sys Close(fd int) (err error)
//sys CloseRange(first uint, last uint, flags uint) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
index 312ae6ac1..7bf5c04bb 100644
--- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
@@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
+func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) {
+ xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset)
+ return unsafe.Pointer(xaddr), err
+}
+
+func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) {
+ return mapper.munmap(uintptr(addr), length)
+}
+
//sys Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A
//sysnb Getgid() (gid int)
//sysnb Getpid() (pid int)
@@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) {
// for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/
func isSpecialPath(path []byte) (v bool) {
var special = [4][8]byte{
- [8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'},
- [8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'},
- [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'},
- [8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}}
+ {'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'},
+ {'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'},
+ {'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'},
+ {'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}}
var i, j int
for i = 0; i < len(special); i++ {
@@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) {
//sys Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT
//sys Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT
//sys Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT
+
+func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) {
+ runtime.EnterSyscall()
+ r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg)
+ runtime.ExitSyscall()
+ val = int(r0)
+ if int64(r0) == -1 {
+ err = errnoErr2(e1, e2)
+ }
+ return
+}
+
+func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) {
+ switch op.(type) {
+ case *Flock_t:
+ err = FcntlFlock(fd, cmd, op.(*Flock_t))
+ if err != nil {
+ ret = -1
+ }
+ return
+ case int:
+ return FcntlInt(fd, cmd, op.(int))
+ case *F_cnvrt:
+ return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt))))
+ case unsafe.Pointer:
+ return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer)))
+ default:
+ return -1, EINVAL
+ }
+ return
+}
+
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ return sendfile(outfd, infd, offset, count)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ // TODO: use LE call instead if the call is implemented
+ originalOffset, err := Seek(infd, 0, SEEK_CUR)
+ if err != nil {
+ return -1, err
+ }
+ //start reading data from in_fd
+ if offset != nil {
+ _, err := Seek(infd, *offset, SEEK_SET)
+ if err != nil {
+ return -1, err
+ }
+ }
+
+ buf := make([]byte, count)
+ readBuf := make([]byte, 0)
+ var n int = 0
+ for i := 0; i < count; i += n {
+ n, err := Read(infd, buf)
+ if n == 0 {
+ if err != nil {
+ return -1, err
+ } else { // EOF
+ break
+ }
+ }
+ readBuf = append(readBuf, buf...)
+ buf = buf[0:0]
+ }
+
+ n2, err := Write(outfd, readBuf)
+ if err != nil {
+ return -1, err
+ }
+
+ //When sendfile() returns, this variable will be set to the
+ // offset of the byte following the last byte that was read.
+ if offset != nil {
+ *offset = *offset + int64(n)
+ // If offset is not NULL, then sendfile() does not modify the file
+ // offset of in_fd
+ _, err := Seek(infd, originalOffset, SEEK_SET)
+ if err != nil {
+ return -1, err
+ }
+ }
+ return n2, nil
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index de3b46248..ccba391c9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -2625,6 +2625,28 @@ const (
PR_UNALIGN_NOPRINT = 0x1
PR_UNALIGN_SIGBUS = 0x2
PSTOREFS_MAGIC = 0x6165676c
+ PTP_CLK_MAGIC = '='
+ PTP_ENABLE_FEATURE = 0x1
+ PTP_EXTTS_EDGES = 0x6
+ PTP_EXTTS_EVENT_VALID = 0x1
+ PTP_EXTTS_V1_VALID_FLAGS = 0x7
+ PTP_EXTTS_VALID_FLAGS = 0x1f
+ PTP_EXT_OFFSET = 0x10
+ PTP_FALLING_EDGE = 0x4
+ PTP_MAX_SAMPLES = 0x19
+ PTP_PEROUT_DUTY_CYCLE = 0x2
+ PTP_PEROUT_ONE_SHOT = 0x1
+ PTP_PEROUT_PHASE = 0x4
+ PTP_PEROUT_V1_VALID_FLAGS = 0x0
+ PTP_PEROUT_VALID_FLAGS = 0x7
+ PTP_PIN_GETFUNC = 0xc0603d06
+ PTP_PIN_GETFUNC2 = 0xc0603d0f
+ PTP_RISING_EDGE = 0x2
+ PTP_STRICT_FLAGS = 0x8
+ PTP_SYS_OFFSET_EXTENDED = 0xc4c03d09
+ PTP_SYS_OFFSET_EXTENDED2 = 0xc4c03d12
+ PTP_SYS_OFFSET_PRECISE = 0xc0403d08
+ PTP_SYS_OFFSET_PRECISE2 = 0xc0403d11
PTRACE_ATTACH = 0x10
PTRACE_CONT = 0x7
PTRACE_DETACH = 0x11
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 8aa6d77c0..0c00cb3f3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -237,6 +237,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffff
+ PTP_CLOCK_GETCAPS = 0x80503d01
+ PTP_CLOCK_GETCAPS2 = 0x80503d0a
+ PTP_ENABLE_PPS = 0x40043d04
+ PTP_ENABLE_PPS2 = 0x40043d0d
+ PTP_EXTTS_REQUEST = 0x40103d02
+ PTP_EXTTS_REQUEST2 = 0x40103d0b
+ PTP_MASK_CLEAR_ALL = 0x3d13
+ PTP_MASK_EN_SINGLE = 0x40043d14
+ PTP_PEROUT_REQUEST = 0x40383d03
+ PTP_PEROUT_REQUEST2 = 0x40383d0c
+ PTP_PIN_SETFUNC = 0x40603d07
+ PTP_PIN_SETFUNC2 = 0x40603d10
+ PTP_SYS_OFFSET = 0x43403d05
+ PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GETFPXREGS = 0x12
PTRACE_GET_THREAD_AREA = 0x19
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index da428f425..dfb364554 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -237,6 +237,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x80503d01
+ PTP_CLOCK_GETCAPS2 = 0x80503d0a
+ PTP_ENABLE_PPS = 0x40043d04
+ PTP_ENABLE_PPS2 = 0x40043d0d
+ PTP_EXTTS_REQUEST = 0x40103d02
+ PTP_EXTTS_REQUEST2 = 0x40103d0b
+ PTP_MASK_CLEAR_ALL = 0x3d13
+ PTP_MASK_EN_SINGLE = 0x40043d14
+ PTP_PEROUT_REQUEST = 0x40383d03
+ PTP_PEROUT_REQUEST2 = 0x40383d0c
+ PTP_PIN_SETFUNC = 0x40603d07
+ PTP_PIN_SETFUNC2 = 0x40603d10
+ PTP_SYS_OFFSET = 0x43403d05
+ PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_ARCH_PRCTL = 0x1e
PTRACE_GETFPREGS = 0xe
PTRACE_GETFPXREGS = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index bf45bfec7..d46dcf78a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffff
+ PTP_CLOCK_GETCAPS = 0x80503d01
+ PTP_CLOCK_GETCAPS2 = 0x80503d0a
+ PTP_ENABLE_PPS = 0x40043d04
+ PTP_ENABLE_PPS2 = 0x40043d0d
+ PTP_EXTTS_REQUEST = 0x40103d02
+ PTP_EXTTS_REQUEST2 = 0x40103d0b
+ PTP_MASK_CLEAR_ALL = 0x3d13
+ PTP_MASK_EN_SINGLE = 0x40043d14
+ PTP_PEROUT_REQUEST = 0x40383d03
+ PTP_PEROUT_REQUEST2 = 0x40383d0c
+ PTP_PIN_SETFUNC = 0x40603d07
+ PTP_PIN_SETFUNC2 = 0x40603d10
+ PTP_SYS_OFFSET = 0x43403d05
+ PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_GETCRUNCHREGS = 0x19
PTRACE_GETFDPIC = 0x1f
PTRACE_GETFDPIC_EXEC = 0x0
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 71c67162b..3af3248a7 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -240,6 +240,20 @@ const (
PROT_BTI = 0x10
PROT_MTE = 0x20
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x80503d01
+ PTP_CLOCK_GETCAPS2 = 0x80503d0a
+ PTP_ENABLE_PPS = 0x40043d04
+ PTP_ENABLE_PPS2 = 0x40043d0d
+ PTP_EXTTS_REQUEST = 0x40103d02
+ PTP_EXTTS_REQUEST2 = 0x40103d0b
+ PTP_MASK_CLEAR_ALL = 0x3d13
+ PTP_MASK_EN_SINGLE = 0x40043d14
+ PTP_PEROUT_REQUEST = 0x40383d03
+ PTP_PEROUT_REQUEST2 = 0x40383d0c
+ PTP_PIN_SETFUNC = 0x40603d07
+ PTP_PIN_SETFUNC2 = 0x40603d10
+ PTP_SYS_OFFSET = 0x43403d05
+ PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_PEEKMTETAGS = 0x21
PTRACE_POKEMTETAGS = 0x22
PTRACE_SYSEMU = 0x1f
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 9476628fa..292bcf028 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -238,6 +238,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x80503d01
+ PTP_CLOCK_GETCAPS2 = 0x80503d0a
+ PTP_ENABLE_PPS = 0x40043d04
+ PTP_ENABLE_PPS2 = 0x40043d0d
+ PTP_EXTTS_REQUEST = 0x40103d02
+ PTP_EXTTS_REQUEST2 = 0x40103d0b
+ PTP_MASK_CLEAR_ALL = 0x3d13
+ PTP_MASK_EN_SINGLE = 0x40043d14
+ PTP_PEROUT_REQUEST = 0x40383d03
+ PTP_PEROUT_REQUEST2 = 0x40383d0c
+ PTP_PIN_SETFUNC = 0x40603d07
+ PTP_PIN_SETFUNC2 = 0x40603d10
+ PTP_SYS_OFFSET = 0x43403d05
+ PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_SYSEMU = 0x1f
PTRACE_SYSEMU_SINGLESTEP = 0x20
RLIMIT_AS = 0x9
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index b9e85f3cf..782b7110f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffff
+ PTP_CLOCK_GETCAPS = 0x40503d01
+ PTP_CLOCK_GETCAPS2 = 0x40503d0a
+ PTP_ENABLE_PPS = 0x80043d04
+ PTP_ENABLE_PPS2 = 0x80043d0d
+ PTP_EXTTS_REQUEST = 0x80103d02
+ PTP_EXTTS_REQUEST2 = 0x80103d0b
+ PTP_MASK_CLEAR_ALL = 0x20003d13
+ PTP_MASK_EN_SINGLE = 0x80043d14
+ PTP_PEROUT_REQUEST = 0x80383d03
+ PTP_PEROUT_REQUEST2 = 0x80383d0c
+ PTP_PIN_SETFUNC = 0x80603d07
+ PTP_PIN_SETFUNC2 = 0x80603d10
+ PTP_SYS_OFFSET = 0x83403d05
+ PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_GET_THREAD_AREA_3264 = 0xc4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index a48b68a76..84973fd92 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x40503d01
+ PTP_CLOCK_GETCAPS2 = 0x40503d0a
+ PTP_ENABLE_PPS = 0x80043d04
+ PTP_ENABLE_PPS2 = 0x80043d0d
+ PTP_EXTTS_REQUEST = 0x80103d02
+ PTP_EXTTS_REQUEST2 = 0x80103d0b
+ PTP_MASK_CLEAR_ALL = 0x20003d13
+ PTP_MASK_EN_SINGLE = 0x80043d14
+ PTP_PEROUT_REQUEST = 0x80383d03
+ PTP_PEROUT_REQUEST2 = 0x80383d0c
+ PTP_PIN_SETFUNC = 0x80603d07
+ PTP_PIN_SETFUNC2 = 0x80603d10
+ PTP_SYS_OFFSET = 0x83403d05
+ PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_GET_THREAD_AREA_3264 = 0xc4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index ea00e8522..6d9cbc3b2 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x40503d01
+ PTP_CLOCK_GETCAPS2 = 0x40503d0a
+ PTP_ENABLE_PPS = 0x80043d04
+ PTP_ENABLE_PPS2 = 0x80043d0d
+ PTP_EXTTS_REQUEST = 0x80103d02
+ PTP_EXTTS_REQUEST2 = 0x80103d0b
+ PTP_MASK_CLEAR_ALL = 0x20003d13
+ PTP_MASK_EN_SINGLE = 0x80043d14
+ PTP_PEROUT_REQUEST = 0x80383d03
+ PTP_PEROUT_REQUEST2 = 0x80383d0c
+ PTP_PIN_SETFUNC = 0x80603d07
+ PTP_PIN_SETFUNC2 = 0x80603d10
+ PTP_SYS_OFFSET = 0x83403d05
+ PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_GET_THREAD_AREA_3264 = 0xc4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 91c646871..5f9fedbce 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffff
+ PTP_CLOCK_GETCAPS = 0x40503d01
+ PTP_CLOCK_GETCAPS2 = 0x40503d0a
+ PTP_ENABLE_PPS = 0x80043d04
+ PTP_ENABLE_PPS2 = 0x80043d0d
+ PTP_EXTTS_REQUEST = 0x80103d02
+ PTP_EXTTS_REQUEST2 = 0x80103d0b
+ PTP_MASK_CLEAR_ALL = 0x20003d13
+ PTP_MASK_EN_SINGLE = 0x80043d14
+ PTP_PEROUT_REQUEST = 0x80383d03
+ PTP_PEROUT_REQUEST2 = 0x80383d0c
+ PTP_PIN_SETFUNC = 0x80603d07
+ PTP_PIN_SETFUNC2 = 0x80603d10
+ PTP_SYS_OFFSET = 0x83403d05
+ PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPREGS = 0xe
PTRACE_GET_THREAD_AREA = 0x19
PTRACE_GET_THREAD_AREA_3264 = 0xc4
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 8cbf38d63..bb0026ee0 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -237,6 +237,20 @@ const (
PPPIOCXFERUNIT = 0x2000744e
PROT_SAO = 0x10
PR_SET_PTRACER_ANY = 0xffffffff
+ PTP_CLOCK_GETCAPS = 0x40503d01
+ PTP_CLOCK_GETCAPS2 = 0x40503d0a
+ PTP_ENABLE_PPS = 0x80043d04
+ PTP_ENABLE_PPS2 = 0x80043d0d
+ PTP_EXTTS_REQUEST = 0x80103d02
+ PTP_EXTTS_REQUEST2 = 0x80103d0b
+ PTP_MASK_CLEAR_ALL = 0x20003d13
+ PTP_MASK_EN_SINGLE = 0x80043d14
+ PTP_PEROUT_REQUEST = 0x80383d03
+ PTP_PEROUT_REQUEST2 = 0x80383d0c
+ PTP_PIN_SETFUNC = 0x80603d07
+ PTP_PIN_SETFUNC2 = 0x80603d10
+ PTP_SYS_OFFSET = 0x83403d05
+ PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETEVRREGS = 0x14
PTRACE_GETFPREGS = 0xe
PTRACE_GETREGS64 = 0x16
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index a2df73419..46120db5c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -237,6 +237,20 @@ const (
PPPIOCXFERUNIT = 0x2000744e
PROT_SAO = 0x10
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x40503d01
+ PTP_CLOCK_GETCAPS2 = 0x40503d0a
+ PTP_ENABLE_PPS = 0x80043d04
+ PTP_ENABLE_PPS2 = 0x80043d0d
+ PTP_EXTTS_REQUEST = 0x80103d02
+ PTP_EXTTS_REQUEST2 = 0x80103d0b
+ PTP_MASK_CLEAR_ALL = 0x20003d13
+ PTP_MASK_EN_SINGLE = 0x80043d14
+ PTP_PEROUT_REQUEST = 0x80383d03
+ PTP_PEROUT_REQUEST2 = 0x80383d0c
+ PTP_PIN_SETFUNC = 0x80603d07
+ PTP_PIN_SETFUNC2 = 0x80603d10
+ PTP_SYS_OFFSET = 0x83403d05
+ PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETEVRREGS = 0x14
PTRACE_GETFPREGS = 0xe
PTRACE_GETREGS64 = 0x16
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 247913792..5c951634f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -237,6 +237,20 @@ const (
PPPIOCXFERUNIT = 0x2000744e
PROT_SAO = 0x10
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x40503d01
+ PTP_CLOCK_GETCAPS2 = 0x40503d0a
+ PTP_ENABLE_PPS = 0x80043d04
+ PTP_ENABLE_PPS2 = 0x80043d0d
+ PTP_EXTTS_REQUEST = 0x80103d02
+ PTP_EXTTS_REQUEST2 = 0x80103d0b
+ PTP_MASK_CLEAR_ALL = 0x20003d13
+ PTP_MASK_EN_SINGLE = 0x80043d14
+ PTP_PEROUT_REQUEST = 0x80383d03
+ PTP_PEROUT_REQUEST2 = 0x80383d0c
+ PTP_PIN_SETFUNC = 0x80603d07
+ PTP_PIN_SETFUNC2 = 0x80603d10
+ PTP_SYS_OFFSET = 0x83403d05
+ PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETEVRREGS = 0x14
PTRACE_GETFPREGS = 0xe
PTRACE_GETREGS64 = 0x16
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index d265f146e..11a84d5af 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x80503d01
+ PTP_CLOCK_GETCAPS2 = 0x80503d0a
+ PTP_ENABLE_PPS = 0x40043d04
+ PTP_ENABLE_PPS2 = 0x40043d0d
+ PTP_EXTTS_REQUEST = 0x40103d02
+ PTP_EXTTS_REQUEST2 = 0x40103d0b
+ PTP_MASK_CLEAR_ALL = 0x3d13
+ PTP_MASK_EN_SINGLE = 0x40043d14
+ PTP_PEROUT_REQUEST = 0x40383d03
+ PTP_PEROUT_REQUEST2 = 0x40383d0c
+ PTP_PIN_SETFUNC = 0x40603d07
+ PTP_PIN_SETFUNC2 = 0x40603d10
+ PTP_SYS_OFFSET = 0x43403d05
+ PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_GETFDPIC = 0x21
PTRACE_GETFDPIC_EXEC = 0x0
PTRACE_GETFDPIC_INTERP = 0x1
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 3f2d64439..f78c4617c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -234,6 +234,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x7434
PPPIOCXFERUNIT = 0x744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x80503d01
+ PTP_CLOCK_GETCAPS2 = 0x80503d0a
+ PTP_ENABLE_PPS = 0x40043d04
+ PTP_ENABLE_PPS2 = 0x40043d0d
+ PTP_EXTTS_REQUEST = 0x40103d02
+ PTP_EXTTS_REQUEST2 = 0x40103d0b
+ PTP_MASK_CLEAR_ALL = 0x3d13
+ PTP_MASK_EN_SINGLE = 0x40043d14
+ PTP_PEROUT_REQUEST = 0x40383d03
+ PTP_PEROUT_REQUEST2 = 0x40383d0c
+ PTP_PIN_SETFUNC = 0x40603d07
+ PTP_PIN_SETFUNC2 = 0x40603d10
+ PTP_SYS_OFFSET = 0x43403d05
+ PTP_SYS_OFFSET2 = 0x43403d0e
PTRACE_DISABLE_TE = 0x5010
PTRACE_ENABLE_TE = 0x5009
PTRACE_GET_LAST_BREAK = 0x5006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 5d8b727a1..aeb777c34 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -239,6 +239,20 @@ const (
PPPIOCUNBRIDGECHAN = 0x20007434
PPPIOCXFERUNIT = 0x2000744e
PR_SET_PTRACER_ANY = 0xffffffffffffffff
+ PTP_CLOCK_GETCAPS = 0x40503d01
+ PTP_CLOCK_GETCAPS2 = 0x40503d0a
+ PTP_ENABLE_PPS = 0x80043d04
+ PTP_ENABLE_PPS2 = 0x80043d0d
+ PTP_EXTTS_REQUEST = 0x80103d02
+ PTP_EXTTS_REQUEST2 = 0x80103d0b
+ PTP_MASK_CLEAR_ALL = 0x20003d13
+ PTP_MASK_EN_SINGLE = 0x80043d14
+ PTP_PEROUT_REQUEST = 0x80383d03
+ PTP_PEROUT_REQUEST2 = 0x80383d0c
+ PTP_PIN_SETFUNC = 0x80603d07
+ PTP_PIN_SETFUNC2 = 0x80603d10
+ PTP_SYS_OFFSET = 0x83403d05
+ PTP_SYS_OFFSET2 = 0x83403d0e
PTRACE_GETFPAREGS = 0x14
PTRACE_GETFPREGS = 0xe
PTRACE_GETFPREGS64 = 0x19
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index af30da557..5cc1e8eb2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func ClockSettime(clockid int32, time *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
_, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 3a69e4549..8daaf3faf 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -1752,12 +1752,6 @@ const (
IFLA_IPVLAN_UNSPEC = 0x0
IFLA_IPVLAN_MODE = 0x1
IFLA_IPVLAN_FLAGS = 0x2
- NETKIT_NEXT = -0x1
- NETKIT_PASS = 0x0
- NETKIT_DROP = 0x2
- NETKIT_REDIRECT = 0x7
- NETKIT_L2 = 0x0
- NETKIT_L3 = 0x1
IFLA_NETKIT_UNSPEC = 0x0
IFLA_NETKIT_PEER_INFO = 0x1
IFLA_NETKIT_PRIMARY = 0x2
@@ -1796,6 +1790,7 @@ const (
IFLA_VXLAN_DF = 0x1d
IFLA_VXLAN_VNIFILTER = 0x1e
IFLA_VXLAN_LOCALBYPASS = 0x1f
+ IFLA_VXLAN_LABEL_POLICY = 0x20
IFLA_GENEVE_UNSPEC = 0x0
IFLA_GENEVE_ID = 0x1
IFLA_GENEVE_REMOTE = 0x2
@@ -1825,6 +1820,8 @@ const (
IFLA_GTP_ROLE = 0x4
IFLA_GTP_CREATE_SOCKETS = 0x5
IFLA_GTP_RESTART_COUNT = 0x6
+ IFLA_GTP_LOCAL = 0x7
+ IFLA_GTP_LOCAL6 = 0x8
IFLA_BOND_UNSPEC = 0x0
IFLA_BOND_MODE = 0x1
IFLA_BOND_ACTIVE_SLAVE = 0x2
@@ -1857,6 +1854,7 @@ const (
IFLA_BOND_AD_LACP_ACTIVE = 0x1d
IFLA_BOND_MISSED_MAX = 0x1e
IFLA_BOND_NS_IP6_TARGET = 0x1f
+ IFLA_BOND_COUPLED_CONTROL = 0x20
IFLA_BOND_AD_INFO_UNSPEC = 0x0
IFLA_BOND_AD_INFO_AGGREGATOR = 0x1
IFLA_BOND_AD_INFO_NUM_PORTS = 0x2
@@ -1925,6 +1923,7 @@ const (
IFLA_HSR_SEQ_NR = 0x5
IFLA_HSR_VERSION = 0x6
IFLA_HSR_PROTOCOL = 0x7
+ IFLA_HSR_INTERLINK = 0x8
IFLA_STATS_UNSPEC = 0x0
IFLA_STATS_LINK_64 = 0x1
IFLA_STATS_LINK_XSTATS = 0x2
@@ -1977,6 +1976,15 @@ const (
IFLA_DSA_MASTER = 0x1
)
+const (
+ NETKIT_NEXT = -0x1
+ NETKIT_PASS = 0x0
+ NETKIT_DROP = 0x2
+ NETKIT_REDIRECT = 0x7
+ NETKIT_L2 = 0x0
+ NETKIT_L3 = 0x1
+)
+
const (
NF_INET_PRE_ROUTING = 0x0
NF_INET_LOCAL_IN = 0x1
@@ -4110,6 +4118,106 @@ type EthtoolDrvinfo struct {
Regdump_len uint32
}
+type EthtoolTsInfo struct {
+ Cmd uint32
+ So_timestamping uint32
+ Phc_index int32
+ Tx_types uint32
+ Tx_reserved [3]uint32
+ Rx_filters uint32
+ Rx_reserved [3]uint32
+}
+
+type HwTstampConfig struct {
+ Flags int32
+ Tx_type int32
+ Rx_filter int32
+}
+
+const (
+ HWTSTAMP_FILTER_NONE = 0x0
+ HWTSTAMP_FILTER_ALL = 0x1
+ HWTSTAMP_FILTER_SOME = 0x2
+ HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3
+ HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6
+ HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9
+ HWTSTAMP_FILTER_PTP_V2_EVENT = 0xc
+)
+
+const (
+ HWTSTAMP_TX_OFF = 0x0
+ HWTSTAMP_TX_ON = 0x1
+ HWTSTAMP_TX_ONESTEP_SYNC = 0x2
+)
+
+type (
+ PtpClockCaps struct {
+ Max_adj int32
+ N_alarm int32
+ N_ext_ts int32
+ N_per_out int32
+ Pps int32
+ N_pins int32
+ Cross_timestamping int32
+ Adjust_phase int32
+ Max_phase_adj int32
+ Rsv [11]int32
+ }
+ PtpClockTime struct {
+ Sec int64
+ Nsec uint32
+ Reserved uint32
+ }
+ PtpExttsEvent struct {
+ T PtpClockTime
+ Index uint32
+ Flags uint32
+ Rsv [2]uint32
+ }
+ PtpExttsRequest struct {
+ Index uint32
+ Flags uint32
+ Rsv [2]uint32
+ }
+ PtpPeroutRequest struct {
+ StartOrPhase PtpClockTime
+ Period PtpClockTime
+ Index uint32
+ Flags uint32
+ On PtpClockTime
+ }
+ PtpPinDesc struct {
+ Name [64]byte
+ Index uint32
+ Func uint32
+ Chan uint32
+ Rsv [5]uint32
+ }
+ PtpSysOffset struct {
+ Samples uint32
+ Rsv [3]uint32
+ Ts [51]PtpClockTime
+ }
+ PtpSysOffsetExtended struct {
+ Samples uint32
+ Rsv [3]uint32
+ Ts [25][3]PtpClockTime
+ }
+ PtpSysOffsetPrecise struct {
+ Device PtpClockTime
+ Realtime PtpClockTime
+ Monoraw PtpClockTime
+ Rsv [4]uint32
+ }
+)
+
+const (
+ PTP_PF_NONE = 0x0
+ PTP_PF_EXTTS = 0x1
+ PTP_PF_PEROUT = 0x2
+ PTP_PF_PHYSYNC = 0x3
+)
+
type (
HIDRawReportDescriptor struct {
Size uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
index d9a13af46..2e5d5a443 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
@@ -377,6 +377,12 @@ type Flock_t struct {
Pid int32
}
+type F_cnvrt struct {
+ Cvtcmd int32
+ Pccsid int16
+ Fccsid int16
+}
+
type Termios struct {
Cflag uint32
Iflag uint32
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 5cee9a314..4510bfc3f 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration {
}
func Ftruncate(fd Handle, length int64) (err error) {
- curoffset, e := Seek(fd, 0, 1)
- if e != nil {
- return e
- }
- defer Seek(fd, curoffset, 0)
- _, e = Seek(fd, length, 0)
- if e != nil {
- return e
+ type _FILE_END_OF_FILE_INFO struct {
+ EndOfFile int64
}
- e = SetEndOfFile(fd)
- if e != nil {
- return e
- }
- return nil
+ var info _FILE_END_OF_FILE_INFO
+ info.EndOfFile = length
+ return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
}
func Gettimeofday(tv *Timeval) (err error) {
@@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0))
//sys GetACP() (acp uint32) = kernel32.GetACP
//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
+//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
+//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
+//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
+//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
+//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
// For testing: clients can set this flag to force
// creation of IPv6 sockets to return EAFNOSUPPORT.
@@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string {
// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for
// the more common *uint16 string type.
func NewNTUnicodeString(s string) (*NTUnicodeString, error) {
- var u NTUnicodeString
- s16, err := UTF16PtrFromString(s)
+ s16, err := UTF16FromString(s)
if err != nil {
return nil, err
}
- RtlInitUnicodeString(&u, s16)
- return &u, nil
+ n := uint16(len(s16) * 2)
+ return &NTUnicodeString{
+ Length: n - 2, // subtract 2 bytes for the NULL terminator
+ MaximumLength: n,
+ Buffer: &s16[0],
+ }, nil
}
// Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 7b97a154c..51311e205 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -2203,6 +2203,132 @@ const (
IfOperStatusLowerLayerDown = 7
)
+const (
+ IF_MAX_PHYS_ADDRESS_LENGTH = 32
+ IF_MAX_STRING_SIZE = 256
+)
+
+// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex.
+const (
+ MibIfEntryNormal = 0
+ MibIfEntryNormalWithoutStatistics = 2
+)
+
+// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type.
+const (
+ MibParameterNotification = 0
+ MibAddInstance = 1
+ MibDeleteInstance = 2
+ MibInitialNotification = 3
+)
+
+// MibIfRow2 stores information about a particular interface. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2.
+type MibIfRow2 struct {
+ InterfaceLuid uint64
+ InterfaceIndex uint32
+ InterfaceGuid GUID
+ Alias [IF_MAX_STRING_SIZE + 1]uint16
+ Description [IF_MAX_STRING_SIZE + 1]uint16
+ PhysicalAddressLength uint32
+ PhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
+ PermanentPhysicalAddress [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
+ Mtu uint32
+ Type uint32
+ TunnelType uint32
+ MediaType uint32
+ PhysicalMediumType uint32
+ AccessType uint32
+ DirectionType uint32
+ InterfaceAndOperStatusFlags uint8
+ OperStatus uint32
+ AdminStatus uint32
+ MediaConnectState uint32
+ NetworkGuid GUID
+ ConnectionType uint32
+ TransmitLinkSpeed uint64
+ ReceiveLinkSpeed uint64
+ InOctets uint64
+ InUcastPkts uint64
+ InNUcastPkts uint64
+ InDiscards uint64
+ InErrors uint64
+ InUnknownProtos uint64
+ InUcastOctets uint64
+ InMulticastOctets uint64
+ InBroadcastOctets uint64
+ OutOctets uint64
+ OutUcastPkts uint64
+ OutNUcastPkts uint64
+ OutDiscards uint64
+ OutErrors uint64
+ OutUcastOctets uint64
+ OutMulticastOctets uint64
+ OutBroadcastOctets uint64
+ OutQLen uint64
+}
+
+// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
+type MibUnicastIpAddressRow struct {
+ Address RawSockaddrInet6 // SOCKADDR_INET union
+ InterfaceLuid uint64
+ InterfaceIndex uint32
+ PrefixOrigin uint32
+ SuffixOrigin uint32
+ ValidLifetime uint32
+ PreferredLifetime uint32
+ OnLinkPrefixLength uint8
+ SkipAsSource uint8
+ DadState uint32
+ ScopeId uint32
+ CreationTimeStamp Filetime
+}
+
+const ScopeLevelCount = 16
+
+// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface.
+// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row.
+type MibIpInterfaceRow struct {
+ Family uint16
+ InterfaceLuid uint64
+ InterfaceIndex uint32
+ MaxReassemblySize uint32
+ InterfaceIdentifier uint64
+ MinRouterAdvertisementInterval uint32
+ MaxRouterAdvertisementInterval uint32
+ AdvertisingEnabled uint8
+ ForwardingEnabled uint8
+ WeakHostSend uint8
+ WeakHostReceive uint8
+ UseAutomaticMetric uint8
+ UseNeighborUnreachabilityDetection uint8
+ ManagedAddressConfigurationSupported uint8
+ OtherStatefulConfigurationSupported uint8
+ AdvertiseDefaultRoute uint8
+ RouterDiscoveryBehavior uint32
+ DadTransmits uint32
+ BaseReachableTime uint32
+ RetransmitTime uint32
+ PathMtuDiscoveryTimeout uint32
+ LinkLocalAddressBehavior uint32
+ LinkLocalAddressTimeout uint32
+ ZoneIndices [ScopeLevelCount]uint32
+ SitePrefixLength uint32
+ Metric uint32
+ NlMtu uint32
+ Connected uint8
+ SupportsWakeUpPatterns uint8
+ SupportsNeighborDiscovery uint8
+ SupportsRouterDiscovery uint8
+ ReachableTime uint32
+ TransmitOffload uint32
+ ReceiveOffload uint32
+ DisableDefaultRoutes uint8
+}
+
// Console related constants used for the mode parameter to SetConsoleMode. See
// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details.
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 4c2e1bdc0..6f5252880 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -181,10 +181,15 @@ var (
procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree")
procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute")
procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute")
+ procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2")
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo")
procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx")
procGetIfEntry = modiphlpapi.NewProc("GetIfEntry")
+ procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex")
+ procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
+ procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange")
+ procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
procAddDllDirectory = modkernel32.NewProc("AddDllDirectory")
procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject")
procCancelIo = modkernel32.NewProc("CancelIo")
@@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
return
}
+func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
if r0 != 0 {
@@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
return
}
+func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+ var _p0 uint32
+ if initialNotification {
+ _p0 = 1
+ }
+ r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+ var _p0 uint32
+ if initialNotification {
+ _p0 = 1
+ }
+ r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
cookie = uintptr(r0)
diff --git a/vendor/golang.org/x/term/README.md b/vendor/golang.org/x/term/README.md
index d03d0aefe..05ff623f9 100644
--- a/vendor/golang.org/x/term/README.md
+++ b/vendor/golang.org/x/term/README.md
@@ -4,16 +4,13 @@
This repository provides Go terminal and console support packages.
-## Download/Install
-
-The easiest way to install is to run `go get -u golang.org/x/term`. You can
-also manually git clone the repository to `$GOPATH/src/golang.org/x/term`.
-
## Report Issues / Send Patches
This repository uses Gerrit for code changes. To learn how to submit changes to
-this repository, see https://golang.org/doc/contribute.html.
+this repository, see https://go.dev/doc/contribute.
+
+The git repository is https://go.googlesource.com/term.
The main issue tracker for the term repository is located at
-https://github.com/golang/go/issues. Prefix your issue with "x/term:" in the
+https://go.dev/issues. Prefix your issue with "x/term:" in the
subject line, so it is easy to find.
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
index 6a66aea5e..2a7cf70da 100644
--- a/vendor/golang.org/x/time/LICENSE
+++ b/vendor/golang.org/x/time/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index f0e0cf3cb..93a798ab6 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -52,6 +52,8 @@ func Every(interval time.Duration) Limit {
// or its associated context.Context is canceled.
//
// The methods AllowN, ReserveN, and WaitN consume n tokens.
+//
+// Limiter is safe for simultaneous use by multiple goroutines.
type Limiter struct {
mu sync.Mutex
limit Limit
@@ -97,8 +99,9 @@ func (lim *Limiter) Tokens() float64 {
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
- limit: r,
- burst: b,
+ limit: r,
+ burst: b,
+ tokens: float64(b),
}
}
@@ -342,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
tokens: n,
timeToAct: t,
}
- } else if lim.limit == 0 {
- var ok bool
- if lim.burst >= n {
- ok = true
- lim.burst -= n
- }
- return Reservation{
- ok: ok,
- lim: lim,
- tokens: lim.burst,
- timeToAct: t,
- }
}
t, tokens := lim.advance(t)
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
deleted file mode 100644
index 721053c20..000000000
--- a/vendor/google.golang.org/appengine/internal/api.go
+++ /dev/null
@@ -1,678 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io/ioutil"
- "log"
- "net"
- "net/http"
- "net/url"
- "os"
- "runtime"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-
- basepb "google.golang.org/appengine/internal/base"
- logpb "google.golang.org/appengine/internal/log"
- remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-const (
- apiPath = "/rpc_http"
- defaultTicketSuffix = "/default.20150612t184001.0"
-)
-
-var (
- // Incoming headers.
- ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
- dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
- traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
- curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
- userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
- remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
- devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id")
-
- // Outgoing headers.
- apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
- apiEndpointHeaderValue = []string{"app-engine-apis"}
- apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
- apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
- apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
- apiContentType = http.CanonicalHeaderKey("Content-Type")
- apiContentTypeValue = []string{"application/octet-stream"}
- logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
-
- apiHTTPClient = &http.Client{
- Transport: &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: limitDial,
- MaxIdleConns: 1000,
- MaxIdleConnsPerHost: 10000,
- IdleConnTimeout: 90 * time.Second,
- },
- }
-
- defaultTicketOnce sync.Once
- defaultTicket string
- backgroundContextOnce sync.Once
- backgroundContext netcontext.Context
-)
-
-func apiURL() *url.URL {
- host, port := "appengine.googleapis.internal", "10001"
- if h := os.Getenv("API_HOST"); h != "" {
- host = h
- }
- if p := os.Getenv("API_PORT"); p != "" {
- port = p
- }
- return &url.URL{
- Scheme: "http",
- Host: host + ":" + port,
- Path: apiPath,
- }
-}
-
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- c := &context{
- req: r,
- outHeader: w.Header(),
- apiURL: apiURL(),
- }
- r = r.WithContext(withContext(r.Context(), c))
- c.req = r
-
- stopFlushing := make(chan int)
-
- // Patch up RemoteAddr so it looks reasonable.
- if addr := r.Header.Get(userIPHeader); addr != "" {
- r.RemoteAddr = addr
- } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
- r.RemoteAddr = addr
- } else {
- // Should not normally reach here, but pick a sensible default anyway.
- r.RemoteAddr = "127.0.0.1"
- }
- // The address in the headers will most likely be of these forms:
- // 123.123.123.123
- // 2001:db8::1
- // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
- if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
- // Assume the remote address is only a host; add a default port.
- r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
- }
-
- // Start goroutine responsible for flushing app logs.
- // This is done after adding c to ctx.m (and stopped before removing it)
- // because flushing logs requires making an API call.
- go c.logFlusher(stopFlushing)
-
- executeRequestSafely(c, r)
- c.outHeader = nil // make sure header changes aren't respected any more
-
- stopFlushing <- 1 // any logging beyond this point will be dropped
-
- // Flush any pending logs asynchronously.
- c.pendingLogs.Lock()
- flushes := c.pendingLogs.flushes
- if len(c.pendingLogs.lines) > 0 {
- flushes++
- }
- c.pendingLogs.Unlock()
- flushed := make(chan struct{})
- go func() {
- defer close(flushed)
- // Force a log flush, because with very short requests we
- // may not ever flush logs.
- c.flushLog(true)
- }()
- w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
-
- // Avoid nil Write call if c.Write is never called.
- if c.outCode != 0 {
- w.WriteHeader(c.outCode)
- }
- if c.outBody != nil {
- w.Write(c.outBody)
- }
- // Wait for the last flush to complete before returning,
- // otherwise the security ticket will not be valid.
- <-flushed
-}
-
-func executeRequestSafely(c *context, r *http.Request) {
- defer func() {
- if x := recover(); x != nil {
- logf(c, 4, "%s", renderPanic(x)) // 4 == critical
- c.outCode = 500
- }
- }()
-
- http.DefaultServeMux.ServeHTTP(c, r)
-}
-
-func renderPanic(x interface{}) string {
- buf := make([]byte, 16<<10) // 16 KB should be plenty
- buf = buf[:runtime.Stack(buf, false)]
-
- // Remove the first few stack frames:
- // this func
- // the recover closure in the caller
- // That will root the stack trace at the site of the panic.
- const (
- skipStart = "internal.renderPanic"
- skipFrames = 2
- )
- start := bytes.Index(buf, []byte(skipStart))
- p := start
- for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
- p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
- if p < 0 {
- break
- }
- }
- if p >= 0 {
- // buf[start:p+1] is the block to remove.
- // Copy buf[p+1:] over buf[start:] and shrink buf.
- copy(buf[start:], buf[p+1:])
- buf = buf[:len(buf)-(p+1-start)]
- }
-
- // Add panic heading.
- head := fmt.Sprintf("panic: %v\n\n", x)
- if len(head) > len(buf) {
- // Extremely unlikely to happen.
- return head
- }
- copy(buf[len(head):], buf)
- copy(buf, head)
-
- return string(buf)
-}
-
-// context represents the context of an in-flight HTTP request.
-// It implements the appengine.Context and http.ResponseWriter interfaces.
-type context struct {
- req *http.Request
-
- outCode int
- outHeader http.Header
- outBody []byte
-
- pendingLogs struct {
- sync.Mutex
- lines []*logpb.UserAppLogLine
- flushes int
- }
-
- apiURL *url.URL
-}
-
-var contextKey = "holds a *context"
-
-// jointContext joins two contexts in a superficial way.
-// It takes values and timeouts from a base context, and only values from another context.
-type jointContext struct {
- base netcontext.Context
- valuesOnly netcontext.Context
-}
-
-func (c jointContext) Deadline() (time.Time, bool) {
- return c.base.Deadline()
-}
-
-func (c jointContext) Done() <-chan struct{} {
- return c.base.Done()
-}
-
-func (c jointContext) Err() error {
- return c.base.Err()
-}
-
-func (c jointContext) Value(key interface{}) interface{} {
- if val := c.base.Value(key); val != nil {
- return val
- }
- return c.valuesOnly.Value(key)
-}
-
-// fromContext returns the App Engine context or nil if ctx is not
-// derived from an App Engine context.
-func fromContext(ctx netcontext.Context) *context {
- c, _ := ctx.Value(&contextKey).(*context)
- return c
-}
-
-func withContext(parent netcontext.Context, c *context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
- if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
- ctx = withNamespace(ctx, ns)
- }
- return ctx
-}
-
-func toContext(c *context) netcontext.Context {
- return withContext(netcontext.Background(), c)
-}
-
-func IncomingHeaders(ctx netcontext.Context) http.Header {
- if c := fromContext(ctx); c != nil {
- return c.req.Header
- }
- return nil
-}
-
-func ReqContext(req *http.Request) netcontext.Context {
- return req.Context()
-}
-
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
- return jointContext{
- base: parent,
- valuesOnly: req.Context(),
- }
-}
-
-// DefaultTicket returns a ticket used for background context or dev_appserver.
-func DefaultTicket() string {
- defaultTicketOnce.Do(func() {
- if IsDevAppServer() {
- defaultTicket = "testapp" + defaultTicketSuffix
- return
- }
- appID := partitionlessAppID()
- escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
- majVersion := VersionID(nil)
- if i := strings.Index(majVersion, "."); i > 0 {
- majVersion = majVersion[:i]
- }
- defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
- })
- return defaultTicket
-}
-
-func BackgroundContext() netcontext.Context {
- backgroundContextOnce.Do(func() {
- // Compute background security ticket.
- ticket := DefaultTicket()
-
- c := &context{
- req: &http.Request{
- Header: http.Header{
- ticketHeader: []string{ticket},
- },
- },
- apiURL: apiURL(),
- }
- backgroundContext = toContext(c)
-
- // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
- go c.logFlusher(make(chan int))
- })
-
- return backgroundContext
-}
-
-// RegisterTestRequest registers the HTTP request req for testing, such that
-// any API calls are sent to the provided URL. It returns a closure to delete
-// the registration.
-// It should only be used by aetest package.
-func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) {
- c := &context{
- req: req,
- apiURL: apiURL,
- }
- ctx := withContext(decorate(req.Context()), c)
- req = req.WithContext(ctx)
- c.req = req
- return req, func() {}
-}
-
-var errTimeout = &CallError{
- Detail: "Deadline exceeded",
- Code: int32(remotepb.RpcError_CANCELLED),
- Timeout: true,
-}
-
-func (c *context) Header() http.Header { return c.outHeader }
-
-// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
-// codes do not permit a response body (nor response entity headers such as
-// Content-Length, Content-Type, etc).
-func bodyAllowedForStatus(status int) bool {
- switch {
- case status >= 100 && status <= 199:
- return false
- case status == 204:
- return false
- case status == 304:
- return false
- }
- return true
-}
-
-func (c *context) Write(b []byte) (int, error) {
- if c.outCode == 0 {
- c.WriteHeader(http.StatusOK)
- }
- if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
- return 0, http.ErrBodyNotAllowed
- }
- c.outBody = append(c.outBody, b...)
- return len(b), nil
-}
-
-func (c *context) WriteHeader(code int) {
- if c.outCode != 0 {
- logf(c, 3, "WriteHeader called multiple times on request.") // error level
- return
- }
- c.outCode = code
-}
-
-func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
- hreq := &http.Request{
- Method: "POST",
- URL: c.apiURL,
- Header: http.Header{
- apiEndpointHeader: apiEndpointHeaderValue,
- apiMethodHeader: apiMethodHeaderValue,
- apiContentType: apiContentTypeValue,
- apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
- },
- Body: ioutil.NopCloser(bytes.NewReader(body)),
- ContentLength: int64(len(body)),
- Host: c.apiURL.Host,
- }
- if info := c.req.Header.Get(dapperHeader); info != "" {
- hreq.Header.Set(dapperHeader, info)
- }
- if info := c.req.Header.Get(traceHeader); info != "" {
- hreq.Header.Set(traceHeader, info)
- }
-
- tr := apiHTTPClient.Transport.(*http.Transport)
-
- var timedOut int32 // atomic; set to 1 if timed out
- t := time.AfterFunc(timeout, func() {
- atomic.StoreInt32(&timedOut, 1)
- tr.CancelRequest(hreq)
- })
- defer t.Stop()
- defer func() {
- // Check if timeout was exceeded.
- if atomic.LoadInt32(&timedOut) != 0 {
- err = errTimeout
- }
- }()
-
- hresp, err := apiHTTPClient.Do(hreq)
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- defer hresp.Body.Close()
- hrespBody, err := ioutil.ReadAll(hresp.Body)
- if hresp.StatusCode != 200 {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- if err != nil {
- return nil, &CallError{
- Detail: fmt.Sprintf("service bridge response bad: %v", err),
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return hrespBody, nil
-}
-
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
- if ns := NamespaceFromContext(ctx); ns != "" {
- if fn, ok := NamespaceMods[service]; ok {
- fn(in, ns)
- }
- }
-
- if f, ctx, ok := callOverrideFromContext(ctx); ok {
- return f(ctx, service, method, in, out)
- }
-
- // Handle already-done contexts quickly.
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- c := fromContext(ctx)
- if c == nil {
- // Give a good error message rather than a panic lower down.
- return errNotAppEngineContext
- }
-
- // Apply transaction modifications if we're in a transaction.
- if t := transactionFromContext(ctx); t != nil {
- if t.finished {
- return errors.New("transaction context has expired")
- }
- applyTransaction(in, &t.transaction)
- }
-
- // Default RPC timeout is 60s.
- timeout := 60 * time.Second
- if deadline, ok := ctx.Deadline(); ok {
- timeout = deadline.Sub(time.Now())
- }
-
- data, err := proto.Marshal(in)
- if err != nil {
- return err
- }
-
- ticket := c.req.Header.Get(ticketHeader)
- // Use a test ticket under test environment.
- if ticket == "" {
- if appid := ctx.Value(&appIDOverrideKey); appid != nil {
- ticket = appid.(string) + defaultTicketSuffix
- }
- }
- // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
- if ticket == "" {
- ticket = DefaultTicket()
- }
- if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
- ticket = dri
- }
- req := &remotepb.Request{
- ServiceName: &service,
- Method: &method,
- Request: data,
- RequestId: &ticket,
- }
- hreqBody, err := proto.Marshal(req)
- if err != nil {
- return err
- }
-
- hrespBody, err := c.post(hreqBody, timeout)
- if err != nil {
- return err
- }
-
- res := &remotepb.Response{}
- if err := proto.Unmarshal(hrespBody, res); err != nil {
- return err
- }
- if res.RpcError != nil {
- ce := &CallError{
- Detail: res.RpcError.GetDetail(),
- Code: *res.RpcError.Code,
- }
- switch remotepb.RpcError_ErrorCode(ce.Code) {
- case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
- ce.Timeout = true
- }
- return ce
- }
- if res.ApplicationError != nil {
- return &APIError{
- Service: *req.ServiceName,
- Detail: res.ApplicationError.GetDetail(),
- Code: *res.ApplicationError.Code,
- }
- }
- if res.Exception != nil || res.JavaException != nil {
- // This shouldn't happen, but let's be defensive.
- return &CallError{
- Detail: "service bridge returned exception",
- Code: int32(remotepb.RpcError_UNKNOWN),
- }
- }
- return proto.Unmarshal(res.Response, out)
-}
-
-func (c *context) Request() *http.Request {
- return c.req
-}
-
-func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
- // Truncate long log lines.
- // TODO(dsymonds): Check if this is still necessary.
- const lim = 8 << 10
- if len(*ll.Message) > lim {
- suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
- ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
- }
-
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
- c.pendingLogs.Unlock()
-}
-
-var logLevelName = map[int64]string{
- 0: "DEBUG",
- 1: "INFO",
- 2: "WARNING",
- 3: "ERROR",
- 4: "CRITICAL",
-}
-
-func logf(c *context, level int64, format string, args ...interface{}) {
- if c == nil {
- panic("not an App Engine context")
- }
- s := fmt.Sprintf(format, args...)
- s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
- c.addLogLine(&logpb.UserAppLogLine{
- TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
- Level: &level,
- Message: &s,
- })
- // Only duplicate log to stderr if not running on App Engine second generation
- if !IsSecondGen() {
- log.Print(logLevelName[level] + ": " + s)
- }
-}
-
-// flushLog attempts to flush any pending logs to the appserver.
-// It should not be called concurrently.
-func (c *context) flushLog(force bool) (flushed bool) {
- c.pendingLogs.Lock()
- // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
- n, rem := 0, 30<<20
- for ; n < len(c.pendingLogs.lines); n++ {
- ll := c.pendingLogs.lines[n]
- // Each log line will require about 3 bytes of overhead.
- nb := proto.Size(ll) + 3
- if nb > rem {
- break
- }
- rem -= nb
- }
- lines := c.pendingLogs.lines[:n]
- c.pendingLogs.lines = c.pendingLogs.lines[n:]
- c.pendingLogs.Unlock()
-
- if len(lines) == 0 && !force {
- // Nothing to flush.
- return false
- }
-
- rescueLogs := false
- defer func() {
- if rescueLogs {
- c.pendingLogs.Lock()
- c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
- c.pendingLogs.Unlock()
- }
- }()
-
- buf, err := proto.Marshal(&logpb.UserAppLogGroup{
- LogLine: lines,
- })
- if err != nil {
- log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
- rescueLogs = true
- return false
- }
-
- req := &logpb.FlushRequest{
- Logs: buf,
- }
- res := &basepb.VoidProto{}
- c.pendingLogs.Lock()
- c.pendingLogs.flushes++
- c.pendingLogs.Unlock()
- if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
- log.Printf("internal.flushLog: Flush RPC: %v", err)
- rescueLogs = true
- return false
- }
- return true
-}
-
-const (
- // Log flushing parameters.
- flushInterval = 1 * time.Second
- forceFlushInterval = 60 * time.Second
-)
-
-func (c *context) logFlusher(stop <-chan int) {
- lastFlush := time.Now()
- tick := time.NewTicker(flushInterval)
- for {
- select {
- case <-stop:
- // Request finished.
- tick.Stop()
- return
- case <-tick.C:
- force := time.Now().Sub(lastFlush) > forceFlushInterval
- if c.flushLog(force) {
- lastFlush = time.Now()
- }
- }
- }
-}
-
-func ContextForTesting(req *http.Request) netcontext.Context {
- return toContext(&context{req: req})
-}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
deleted file mode 100644
index f0f40b2e3..000000000
--- a/vendor/google.golang.org/appengine/internal/api_classic.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import (
- "errors"
- "fmt"
- "net/http"
- "time"
-
- "appengine"
- "appengine_internal"
- basepb "appengine_internal/base"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-)
-
-var contextKey = "holds an appengine.Context"
-
-// fromContext returns the App Engine context or nil if ctx is not
-// derived from an App Engine context.
-func fromContext(ctx netcontext.Context) appengine.Context {
- c, _ := ctx.Value(&contextKey).(appengine.Context)
- return c
-}
-
-// This is only for classic App Engine adapters.
-func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
- c := fromContext(ctx)
- if c == nil {
- return nil, errNotAppEngineContext
- }
- return c, nil
-}
-
-func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
- ctx := netcontext.WithValue(parent, &contextKey, c)
-
- s := &basepb.StringProto{}
- c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
- if ns := s.GetValue(); ns != "" {
- ctx = NamespacedContext(ctx, ns)
- }
-
- return ctx
-}
-
-func IncomingHeaders(ctx netcontext.Context) http.Header {
- if c := fromContext(ctx); c != nil {
- if req, ok := c.Request().(*http.Request); ok {
- return req.Header
- }
- }
- return nil
-}
-
-func ReqContext(req *http.Request) netcontext.Context {
- return WithContext(netcontext.Background(), req)
-}
-
-func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
- c := appengine.NewContext(req)
- return withContext(parent, c)
-}
-
-type testingContext struct {
- appengine.Context
-
- req *http.Request
-}
-
-func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
-func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
- if service == "__go__" && method == "GetNamespace" {
- return nil
- }
- return fmt.Errorf("testingContext: unsupported Call")
-}
-func (t *testingContext) Request() interface{} { return t.req }
-
-func ContextForTesting(req *http.Request) netcontext.Context {
- return withContext(netcontext.Background(), &testingContext{req: req})
-}
-
-func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
- if ns := NamespaceFromContext(ctx); ns != "" {
- if fn, ok := NamespaceMods[service]; ok {
- fn(in, ns)
- }
- }
-
- if f, ctx, ok := callOverrideFromContext(ctx); ok {
- return f(ctx, service, method, in, out)
- }
-
- // Handle already-done contexts quickly.
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- c := fromContext(ctx)
- if c == nil {
- // Give a good error message rather than a panic lower down.
- return errNotAppEngineContext
- }
-
- // Apply transaction modifications if we're in a transaction.
- if t := transactionFromContext(ctx); t != nil {
- if t.finished {
- return errors.New("transaction context has expired")
- }
- applyTransaction(in, &t.transaction)
- }
-
- var opts *appengine_internal.CallOptions
- if d, ok := ctx.Deadline(); ok {
- opts = &appengine_internal.CallOptions{
- Timeout: d.Sub(time.Now()),
- }
- }
-
- err := c.Call(service, method, in, out, opts)
- switch v := err.(type) {
- case *appengine_internal.APIError:
- return &APIError{
- Service: v.Service,
- Detail: v.Detail,
- Code: v.Code,
- }
- case *appengine_internal.CallError:
- return &CallError{
- Detail: v.Detail,
- Code: v.Code,
- Timeout: v.Timeout,
- }
- }
- return err
-}
-
-func handleHTTP(w http.ResponseWriter, r *http.Request) {
- panic("handleHTTP called; this should be impossible")
-}
-
-func logf(c appengine.Context, level int64, format string, args ...interface{}) {
- var fn func(format string, args ...interface{})
- switch level {
- case 0:
- fn = c.Debugf
- case 1:
- fn = c.Infof
- case 2:
- fn = c.Warningf
- case 3:
- fn = c.Errorf
- case 4:
- fn = c.Criticalf
- default:
- // This shouldn't happen.
- fn = c.Criticalf
- }
- fn(format, args...)
-}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
deleted file mode 100644
index e0c0b214b..000000000
--- a/vendor/google.golang.org/appengine/internal/api_common.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "errors"
- "os"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-)
-
-var errNotAppEngineContext = errors.New("not an App Engine context")
-
-type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
-
-var callOverrideKey = "holds []CallOverrideFunc"
-
-func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
- // We avoid appending to any existing call override
- // so we don't risk overwriting a popped stack below.
- var cofs []CallOverrideFunc
- if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
- cofs = append(cofs, uf...)
- }
- cofs = append(cofs, f)
- return netcontext.WithValue(ctx, &callOverrideKey, cofs)
-}
-
-func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
- cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
- if len(cofs) == 0 {
- return nil, nil, false
- }
- // We found a list of overrides; grab the last, and reconstitute a
- // context that will hide it.
- f := cofs[len(cofs)-1]
- ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
- return f, ctx, true
-}
-
-type logOverrideFunc func(level int64, format string, args ...interface{})
-
-var logOverrideKey = "holds a logOverrideFunc"
-
-func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
- return netcontext.WithValue(ctx, &logOverrideKey, f)
-}
-
-var appIDOverrideKey = "holds a string, being the full app ID"
-
-func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
- return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
-}
-
-var namespaceKey = "holds the namespace string"
-
-func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
- return netcontext.WithValue(ctx, &namespaceKey, ns)
-}
-
-func NamespaceFromContext(ctx netcontext.Context) string {
- // If there's no namespace, return the empty string.
- ns, _ := ctx.Value(&namespaceKey).(string)
- return ns
-}
-
-// FullyQualifiedAppID returns the fully-qualified application ID.
-// This may contain a partition prefix (e.g. "s~" for High Replication apps),
-// or a domain prefix (e.g. "example.com:").
-func FullyQualifiedAppID(ctx netcontext.Context) string {
- if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
- return id
- }
- return fullyQualifiedAppID(ctx)
-}
-
-func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
- if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
- f(level, format, args...)
- return
- }
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- logf(c, level, format, args...)
-}
-
-// NamespacedContext wraps a Context to support namespaces.
-func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
- return withNamespace(ctx, namespace)
-}
-
-// SetTestEnv sets the env variables for testing background ticket in Flex.
-func SetTestEnv() func() {
- var environ = []struct {
- key, value string
- }{
- {"GAE_LONG_APP_ID", "my-app-id"},
- {"GAE_MINOR_VERSION", "067924799508853122"},
- {"GAE_MODULE_INSTANCE", "0"},
- {"GAE_MODULE_NAME", "default"},
- {"GAE_MODULE_VERSION", "20150612t184001"},
- }
-
- for _, v := range environ {
- old := os.Getenv(v.key)
- os.Setenv(v.key, v.value)
- v.value = old
- }
- return func() { // Restore old environment after the test completes.
- for _, v := range environ {
- if v.value == "" {
- os.Unsetenv(v.key)
- continue
- }
- os.Setenv(v.key, v.value)
- }
- }
-}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
deleted file mode 100644
index 11df8c07b..000000000
--- a/vendor/google.golang.org/appengine/internal/app_id.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "strings"
-)
-
-func parseFullAppID(appid string) (partition, domain, displayID string) {
- if i := strings.Index(appid, "~"); i != -1 {
- partition, appid = appid[:i], appid[i+1:]
- }
- if i := strings.Index(appid, ":"); i != -1 {
- domain, appid = appid[:i], appid[i+1:]
- }
- return partition, domain, appid
-}
-
-// appID returns "appid" or "domain.com:appid".
-func appID(fullAppID string) string {
- _, dom, dis := parseFullAppID(fullAppID)
- if dom != "" {
- return dom + ":" + dis
- }
- return dis
-}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
deleted file mode 100644
index db4777e68..000000000
--- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
+++ /dev/null
@@ -1,308 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/base/api_base.proto
-
-package base
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type StringProto struct {
- Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StringProto) Reset() { *m = StringProto{} }
-func (m *StringProto) String() string { return proto.CompactTextString(m) }
-func (*StringProto) ProtoMessage() {}
-func (*StringProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{0}
-}
-func (m *StringProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StringProto.Unmarshal(m, b)
-}
-func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StringProto.Marshal(b, m, deterministic)
-}
-func (dst *StringProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StringProto.Merge(dst, src)
-}
-func (m *StringProto) XXX_Size() int {
- return xxx_messageInfo_StringProto.Size(m)
-}
-func (m *StringProto) XXX_DiscardUnknown() {
- xxx_messageInfo_StringProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StringProto proto.InternalMessageInfo
-
-func (m *StringProto) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-type Integer32Proto struct {
- Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
-func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer32Proto) ProtoMessage() {}
-func (*Integer32Proto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{1}
-}
-func (m *Integer32Proto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Integer32Proto.Unmarshal(m, b)
-}
-func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic)
-}
-func (dst *Integer32Proto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Integer32Proto.Merge(dst, src)
-}
-func (m *Integer32Proto) XXX_Size() int {
- return xxx_messageInfo_Integer32Proto.Size(m)
-}
-func (m *Integer32Proto) XXX_DiscardUnknown() {
- xxx_messageInfo_Integer32Proto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo
-
-func (m *Integer32Proto) GetValue() int32 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type Integer64Proto struct {
- Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
-func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
-func (*Integer64Proto) ProtoMessage() {}
-func (*Integer64Proto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{2}
-}
-func (m *Integer64Proto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Integer64Proto.Unmarshal(m, b)
-}
-func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic)
-}
-func (dst *Integer64Proto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Integer64Proto.Merge(dst, src)
-}
-func (m *Integer64Proto) XXX_Size() int {
- return xxx_messageInfo_Integer64Proto.Size(m)
-}
-func (m *Integer64Proto) XXX_DiscardUnknown() {
- xxx_messageInfo_Integer64Proto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo
-
-func (m *Integer64Proto) GetValue() int64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type BoolProto struct {
- Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BoolProto) Reset() { *m = BoolProto{} }
-func (m *BoolProto) String() string { return proto.CompactTextString(m) }
-func (*BoolProto) ProtoMessage() {}
-func (*BoolProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{3}
-}
-func (m *BoolProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BoolProto.Unmarshal(m, b)
-}
-func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic)
-}
-func (dst *BoolProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BoolProto.Merge(dst, src)
-}
-func (m *BoolProto) XXX_Size() int {
- return xxx_messageInfo_BoolProto.Size(m)
-}
-func (m *BoolProto) XXX_DiscardUnknown() {
- xxx_messageInfo_BoolProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BoolProto proto.InternalMessageInfo
-
-func (m *BoolProto) GetValue() bool {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return false
-}
-
-type DoubleProto struct {
- Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DoubleProto) Reset() { *m = DoubleProto{} }
-func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
-func (*DoubleProto) ProtoMessage() {}
-func (*DoubleProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{4}
-}
-func (m *DoubleProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DoubleProto.Unmarshal(m, b)
-}
-func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic)
-}
-func (dst *DoubleProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DoubleProto.Merge(dst, src)
-}
-func (m *DoubleProto) XXX_Size() int {
- return xxx_messageInfo_DoubleProto.Size(m)
-}
-func (m *DoubleProto) XXX_DiscardUnknown() {
- xxx_messageInfo_DoubleProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DoubleProto proto.InternalMessageInfo
-
-func (m *DoubleProto) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return 0
-}
-
-type BytesProto struct {
- Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BytesProto) Reset() { *m = BytesProto{} }
-func (m *BytesProto) String() string { return proto.CompactTextString(m) }
-func (*BytesProto) ProtoMessage() {}
-func (*BytesProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{5}
-}
-func (m *BytesProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BytesProto.Unmarshal(m, b)
-}
-func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic)
-}
-func (dst *BytesProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BytesProto.Merge(dst, src)
-}
-func (m *BytesProto) XXX_Size() int {
- return xxx_messageInfo_BytesProto.Size(m)
-}
-func (m *BytesProto) XXX_DiscardUnknown() {
- xxx_messageInfo_BytesProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BytesProto proto.InternalMessageInfo
-
-func (m *BytesProto) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type VoidProto struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *VoidProto) Reset() { *m = VoidProto{} }
-func (m *VoidProto) String() string { return proto.CompactTextString(m) }
-func (*VoidProto) ProtoMessage() {}
-func (*VoidProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_api_base_9d49f8792e0c1140, []int{6}
-}
-func (m *VoidProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_VoidProto.Unmarshal(m, b)
-}
-func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic)
-}
-func (dst *VoidProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_VoidProto.Merge(dst, src)
-}
-func (m *VoidProto) XXX_Size() int {
- return xxx_messageInfo_VoidProto.Size(m)
-}
-func (m *VoidProto) XXX_DiscardUnknown() {
- xxx_messageInfo_VoidProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_VoidProto proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto")
- proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto")
- proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto")
- proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto")
- proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto")
- proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto")
- proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140)
-}
-
-var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{
- // 199 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30,
- 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40,
- 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6,
- 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62,
- 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71,
- 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe,
- 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1,
- 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71,
- 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d,
- 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff,
- 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d,
- 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba,
- 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
deleted file mode 100644
index 56cd7a3ca..000000000
--- a/vendor/google.golang.org/appengine/internal/base/api_base.proto
+++ /dev/null
@@ -1,33 +0,0 @@
-// Built-in base types for API calls. Primarily useful as return types.
-
-syntax = "proto2";
-option go_package = "base";
-
-package appengine.base;
-
-message StringProto {
- required string value = 1;
-}
-
-message Integer32Proto {
- required int32 value = 1;
-}
-
-message Integer64Proto {
- required int64 value = 1;
-}
-
-message BoolProto {
- required bool value = 1;
-}
-
-message DoubleProto {
- required double value = 1;
-}
-
-message BytesProto {
- required bytes value = 1 [ctype=CORD];
-}
-
-message VoidProto {
-}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
deleted file mode 100644
index 2fb748289..000000000
--- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
+++ /dev/null
@@ -1,4367 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
-
-package datastore
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Property_Meaning int32
-
-const (
- Property_NO_MEANING Property_Meaning = 0
- Property_BLOB Property_Meaning = 14
- Property_TEXT Property_Meaning = 15
- Property_BYTESTRING Property_Meaning = 16
- Property_ATOM_CATEGORY Property_Meaning = 1
- Property_ATOM_LINK Property_Meaning = 2
- Property_ATOM_TITLE Property_Meaning = 3
- Property_ATOM_CONTENT Property_Meaning = 4
- Property_ATOM_SUMMARY Property_Meaning = 5
- Property_ATOM_AUTHOR Property_Meaning = 6
- Property_GD_WHEN Property_Meaning = 7
- Property_GD_EMAIL Property_Meaning = 8
- Property_GEORSS_POINT Property_Meaning = 9
- Property_GD_IM Property_Meaning = 10
- Property_GD_PHONENUMBER Property_Meaning = 11
- Property_GD_POSTALADDRESS Property_Meaning = 12
- Property_GD_RATING Property_Meaning = 13
- Property_BLOBKEY Property_Meaning = 17
- Property_ENTITY_PROTO Property_Meaning = 19
- Property_INDEX_VALUE Property_Meaning = 18
-)
-
-var Property_Meaning_name = map[int32]string{
- 0: "NO_MEANING",
- 14: "BLOB",
- 15: "TEXT",
- 16: "BYTESTRING",
- 1: "ATOM_CATEGORY",
- 2: "ATOM_LINK",
- 3: "ATOM_TITLE",
- 4: "ATOM_CONTENT",
- 5: "ATOM_SUMMARY",
- 6: "ATOM_AUTHOR",
- 7: "GD_WHEN",
- 8: "GD_EMAIL",
- 9: "GEORSS_POINT",
- 10: "GD_IM",
- 11: "GD_PHONENUMBER",
- 12: "GD_POSTALADDRESS",
- 13: "GD_RATING",
- 17: "BLOBKEY",
- 19: "ENTITY_PROTO",
- 18: "INDEX_VALUE",
-}
-var Property_Meaning_value = map[string]int32{
- "NO_MEANING": 0,
- "BLOB": 14,
- "TEXT": 15,
- "BYTESTRING": 16,
- "ATOM_CATEGORY": 1,
- "ATOM_LINK": 2,
- "ATOM_TITLE": 3,
- "ATOM_CONTENT": 4,
- "ATOM_SUMMARY": 5,
- "ATOM_AUTHOR": 6,
- "GD_WHEN": 7,
- "GD_EMAIL": 8,
- "GEORSS_POINT": 9,
- "GD_IM": 10,
- "GD_PHONENUMBER": 11,
- "GD_POSTALADDRESS": 12,
- "GD_RATING": 13,
- "BLOBKEY": 17,
- "ENTITY_PROTO": 19,
- "INDEX_VALUE": 18,
-}
-
-func (x Property_Meaning) Enum() *Property_Meaning {
- p := new(Property_Meaning)
- *p = x
- return p
-}
-func (x Property_Meaning) String() string {
- return proto.EnumName(Property_Meaning_name, int32(x))
-}
-func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
- if err != nil {
- return err
- }
- *x = Property_Meaning(value)
- return nil
-}
-func (Property_Meaning) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0}
-}
-
-type Property_FtsTokenizationOption int32
-
-const (
- Property_HTML Property_FtsTokenizationOption = 1
- Property_ATOM Property_FtsTokenizationOption = 2
-)
-
-var Property_FtsTokenizationOption_name = map[int32]string{
- 1: "HTML",
- 2: "ATOM",
-}
-var Property_FtsTokenizationOption_value = map[string]int32{
- "HTML": 1,
- "ATOM": 2,
-}
-
-func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
- p := new(Property_FtsTokenizationOption)
- *p = x
- return p
-}
-func (x Property_FtsTokenizationOption) String() string {
- return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
-}
-func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
- if err != nil {
- return err
- }
- *x = Property_FtsTokenizationOption(value)
- return nil
-}
-func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1}
-}
-
-type EntityProto_Kind int32
-
-const (
- EntityProto_GD_CONTACT EntityProto_Kind = 1
- EntityProto_GD_EVENT EntityProto_Kind = 2
- EntityProto_GD_MESSAGE EntityProto_Kind = 3
-)
-
-var EntityProto_Kind_name = map[int32]string{
- 1: "GD_CONTACT",
- 2: "GD_EVENT",
- 3: "GD_MESSAGE",
-}
-var EntityProto_Kind_value = map[string]int32{
- "GD_CONTACT": 1,
- "GD_EVENT": 2,
- "GD_MESSAGE": 3,
-}
-
-func (x EntityProto_Kind) Enum() *EntityProto_Kind {
- p := new(EntityProto_Kind)
- *p = x
- return p
-}
-func (x EntityProto_Kind) String() string {
- return proto.EnumName(EntityProto_Kind_name, int32(x))
-}
-func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
- if err != nil {
- return err
- }
- *x = EntityProto_Kind(value)
- return nil
-}
-func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0}
-}
-
-type Index_Property_Direction int32
-
-const (
- Index_Property_ASCENDING Index_Property_Direction = 1
- Index_Property_DESCENDING Index_Property_Direction = 2
-)
-
-var Index_Property_Direction_name = map[int32]string{
- 1: "ASCENDING",
- 2: "DESCENDING",
-}
-var Index_Property_Direction_value = map[string]int32{
- "ASCENDING": 1,
- "DESCENDING": 2,
-}
-
-func (x Index_Property_Direction) Enum() *Index_Property_Direction {
- p := new(Index_Property_Direction)
- *p = x
- return p
-}
-func (x Index_Property_Direction) String() string {
- return proto.EnumName(Index_Property_Direction_name, int32(x))
-}
-func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
- if err != nil {
- return err
- }
- *x = Index_Property_Direction(value)
- return nil
-}
-func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0}
-}
-
-type CompositeIndex_State int32
-
-const (
- CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
- CompositeIndex_READ_WRITE CompositeIndex_State = 2
- CompositeIndex_DELETED CompositeIndex_State = 3
- CompositeIndex_ERROR CompositeIndex_State = 4
-)
-
-var CompositeIndex_State_name = map[int32]string{
- 1: "WRITE_ONLY",
- 2: "READ_WRITE",
- 3: "DELETED",
- 4: "ERROR",
-}
-var CompositeIndex_State_value = map[string]int32{
- "WRITE_ONLY": 1,
- "READ_WRITE": 2,
- "DELETED": 3,
- "ERROR": 4,
-}
-
-func (x CompositeIndex_State) Enum() *CompositeIndex_State {
- p := new(CompositeIndex_State)
- *p = x
- return p
-}
-func (x CompositeIndex_State) String() string {
- return proto.EnumName(CompositeIndex_State_name, int32(x))
-}
-func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
- if err != nil {
- return err
- }
- *x = CompositeIndex_State(value)
- return nil
-}
-func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0}
-}
-
-type Snapshot_Status int32
-
-const (
- Snapshot_INACTIVE Snapshot_Status = 0
- Snapshot_ACTIVE Snapshot_Status = 1
-)
-
-var Snapshot_Status_name = map[int32]string{
- 0: "INACTIVE",
- 1: "ACTIVE",
-}
-var Snapshot_Status_value = map[string]int32{
- "INACTIVE": 0,
- "ACTIVE": 1,
-}
-
-func (x Snapshot_Status) Enum() *Snapshot_Status {
- p := new(Snapshot_Status)
- *p = x
- return p
-}
-func (x Snapshot_Status) String() string {
- return proto.EnumName(Snapshot_Status_name, int32(x))
-}
-func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
- if err != nil {
- return err
- }
- *x = Snapshot_Status(value)
- return nil
-}
-func (Snapshot_Status) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0}
-}
-
-type Query_Hint int32
-
-const (
- Query_ORDER_FIRST Query_Hint = 1
- Query_ANCESTOR_FIRST Query_Hint = 2
- Query_FILTER_FIRST Query_Hint = 3
-)
-
-var Query_Hint_name = map[int32]string{
- 1: "ORDER_FIRST",
- 2: "ANCESTOR_FIRST",
- 3: "FILTER_FIRST",
-}
-var Query_Hint_value = map[string]int32{
- "ORDER_FIRST": 1,
- "ANCESTOR_FIRST": 2,
- "FILTER_FIRST": 3,
-}
-
-func (x Query_Hint) Enum() *Query_Hint {
- p := new(Query_Hint)
- *p = x
- return p
-}
-func (x Query_Hint) String() string {
- return proto.EnumName(Query_Hint_name, int32(x))
-}
-func (x *Query_Hint) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
- if err != nil {
- return err
- }
- *x = Query_Hint(value)
- return nil
-}
-func (Query_Hint) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
-}
-
-type Query_Filter_Operator int32
-
-const (
- Query_Filter_LESS_THAN Query_Filter_Operator = 1
- Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
- Query_Filter_GREATER_THAN Query_Filter_Operator = 3
- Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
- Query_Filter_EQUAL Query_Filter_Operator = 5
- Query_Filter_IN Query_Filter_Operator = 6
- Query_Filter_EXISTS Query_Filter_Operator = 7
-)
-
-var Query_Filter_Operator_name = map[int32]string{
- 1: "LESS_THAN",
- 2: "LESS_THAN_OR_EQUAL",
- 3: "GREATER_THAN",
- 4: "GREATER_THAN_OR_EQUAL",
- 5: "EQUAL",
- 6: "IN",
- 7: "EXISTS",
-}
-var Query_Filter_Operator_value = map[string]int32{
- "LESS_THAN": 1,
- "LESS_THAN_OR_EQUAL": 2,
- "GREATER_THAN": 3,
- "GREATER_THAN_OR_EQUAL": 4,
- "EQUAL": 5,
- "IN": 6,
- "EXISTS": 7,
-}
-
-func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
- p := new(Query_Filter_Operator)
- *p = x
- return p
-}
-func (x Query_Filter_Operator) String() string {
- return proto.EnumName(Query_Filter_Operator_name, int32(x))
-}
-func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
- if err != nil {
- return err
- }
- *x = Query_Filter_Operator(value)
- return nil
-}
-func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0}
-}
-
-type Query_Order_Direction int32
-
-const (
- Query_Order_ASCENDING Query_Order_Direction = 1
- Query_Order_DESCENDING Query_Order_Direction = 2
-)
-
-var Query_Order_Direction_name = map[int32]string{
- 1: "ASCENDING",
- 2: "DESCENDING",
-}
-var Query_Order_Direction_value = map[string]int32{
- "ASCENDING": 1,
- "DESCENDING": 2,
-}
-
-func (x Query_Order_Direction) Enum() *Query_Order_Direction {
- p := new(Query_Order_Direction)
- *p = x
- return p
-}
-func (x Query_Order_Direction) String() string {
- return proto.EnumName(Query_Order_Direction_name, int32(x))
-}
-func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
- if err != nil {
- return err
- }
- *x = Query_Order_Direction(value)
- return nil
-}
-func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0}
-}
-
-type Error_ErrorCode int32
-
-const (
- Error_BAD_REQUEST Error_ErrorCode = 1
- Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
- Error_INTERNAL_ERROR Error_ErrorCode = 3
- Error_NEED_INDEX Error_ErrorCode = 4
- Error_TIMEOUT Error_ErrorCode = 5
- Error_PERMISSION_DENIED Error_ErrorCode = 6
- Error_BIGTABLE_ERROR Error_ErrorCode = 7
- Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
- Error_CAPABILITY_DISABLED Error_ErrorCode = 9
- Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
- Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
-)
-
-var Error_ErrorCode_name = map[int32]string{
- 1: "BAD_REQUEST",
- 2: "CONCURRENT_TRANSACTION",
- 3: "INTERNAL_ERROR",
- 4: "NEED_INDEX",
- 5: "TIMEOUT",
- 6: "PERMISSION_DENIED",
- 7: "BIGTABLE_ERROR",
- 8: "COMMITTED_BUT_STILL_APPLYING",
- 9: "CAPABILITY_DISABLED",
- 10: "TRY_ALTERNATE_BACKEND",
- 11: "SAFE_TIME_TOO_OLD",
-}
-var Error_ErrorCode_value = map[string]int32{
- "BAD_REQUEST": 1,
- "CONCURRENT_TRANSACTION": 2,
- "INTERNAL_ERROR": 3,
- "NEED_INDEX": 4,
- "TIMEOUT": 5,
- "PERMISSION_DENIED": 6,
- "BIGTABLE_ERROR": 7,
- "COMMITTED_BUT_STILL_APPLYING": 8,
- "CAPABILITY_DISABLED": 9,
- "TRY_ALTERNATE_BACKEND": 10,
- "SAFE_TIME_TOO_OLD": 11,
-}
-
-func (x Error_ErrorCode) Enum() *Error_ErrorCode {
- p := new(Error_ErrorCode)
- *p = x
- return p
-}
-func (x Error_ErrorCode) String() string {
- return proto.EnumName(Error_ErrorCode_name, int32(x))
-}
-func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
- if err != nil {
- return err
- }
- *x = Error_ErrorCode(value)
- return nil
-}
-func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0}
-}
-
-type PutRequest_AutoIdPolicy int32
-
-const (
- PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
- PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
-)
-
-var PutRequest_AutoIdPolicy_name = map[int32]string{
- 0: "CURRENT",
- 1: "SEQUENTIAL",
-}
-var PutRequest_AutoIdPolicy_value = map[string]int32{
- "CURRENT": 0,
- "SEQUENTIAL": 1,
-}
-
-func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
- p := new(PutRequest_AutoIdPolicy)
- *p = x
- return p
-}
-func (x PutRequest_AutoIdPolicy) String() string {
- return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
-}
-func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
- if err != nil {
- return err
- }
- *x = PutRequest_AutoIdPolicy(value)
- return nil
-}
-func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0}
-}
-
-type BeginTransactionRequest_TransactionMode int32
-
-const (
- BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0
- BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1
- BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2
-)
-
-var BeginTransactionRequest_TransactionMode_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "READ_ONLY",
- 2: "READ_WRITE",
-}
-var BeginTransactionRequest_TransactionMode_value = map[string]int32{
- "UNKNOWN": 0,
- "READ_ONLY": 1,
- "READ_WRITE": 2,
-}
-
-func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode {
- p := new(BeginTransactionRequest_TransactionMode)
- *p = x
- return p
-}
-func (x BeginTransactionRequest_TransactionMode) String() string {
- return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x))
-}
-func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode")
- if err != nil {
- return err
- }
- *x = BeginTransactionRequest_TransactionMode(value)
- return nil
-}
-func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0}
-}
-
-type Action struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Action) Reset() { *m = Action{} }
-func (m *Action) String() string { return proto.CompactTextString(m) }
-func (*Action) ProtoMessage() {}
-func (*Action) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0}
-}
-func (m *Action) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Action.Unmarshal(m, b)
-}
-func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Action.Marshal(b, m, deterministic)
-}
-func (dst *Action) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Action.Merge(dst, src)
-}
-func (m *Action) XXX_Size() int {
- return xxx_messageInfo_Action.Size(m)
-}
-func (m *Action) XXX_DiscardUnknown() {
- xxx_messageInfo_Action.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Action proto.InternalMessageInfo
-
-type PropertyValue struct {
- Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
- BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
- StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
- DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
- Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"`
- Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"`
- Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue) Reset() { *m = PropertyValue{} }
-func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue) ProtoMessage() {}
-func (*PropertyValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1}
-}
-func (m *PropertyValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue.Unmarshal(m, b)
-}
-func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue.Merge(dst, src)
-}
-func (m *PropertyValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue.Size(m)
-}
-func (m *PropertyValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue proto.InternalMessageInfo
-
-func (m *PropertyValue) GetInt64Value() int64 {
- if m != nil && m.Int64Value != nil {
- return *m.Int64Value
- }
- return 0
-}
-
-func (m *PropertyValue) GetBooleanValue() bool {
- if m != nil && m.BooleanValue != nil {
- return *m.BooleanValue
- }
- return false
-}
-
-func (m *PropertyValue) GetStringValue() string {
- if m != nil && m.StringValue != nil {
- return *m.StringValue
- }
- return ""
-}
-
-func (m *PropertyValue) GetDoubleValue() float64 {
- if m != nil && m.DoubleValue != nil {
- return *m.DoubleValue
- }
- return 0
-}
-
-func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
- if m != nil {
- return m.Pointvalue
- }
- return nil
-}
-
-func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
- if m != nil {
- return m.Uservalue
- }
- return nil
-}
-
-func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
- if m != nil {
- return m.Referencevalue
- }
- return nil
-}
-
-type PropertyValue_PointValue struct {
- X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
- Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
-func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_PointValue) ProtoMessage() {}
-func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0}
-}
-func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src)
-}
-func (m *PropertyValue_PointValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_PointValue.Size(m)
-}
-func (m *PropertyValue_PointValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo
-
-func (m *PropertyValue_PointValue) GetX() float64 {
- if m != nil && m.X != nil {
- return *m.X
- }
- return 0
-}
-
-func (m *PropertyValue_PointValue) GetY() float64 {
- if m != nil && m.Y != nil {
- return *m.Y
- }
- return 0
-}
-
-type PropertyValue_UserValue struct {
- Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
-func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_UserValue) ProtoMessage() {}
-func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1}
-}
-func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src)
-}
-func (m *PropertyValue_UserValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_UserValue.Size(m)
-}
-func (m *PropertyValue_UserValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo
-
-func (m *PropertyValue_UserValue) GetEmail() string {
- if m != nil && m.Email != nil {
- return *m.Email
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetAuthDomain() string {
- if m != nil && m.AuthDomain != nil {
- return *m.AuthDomain
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
- if m != nil && m.FederatedIdentity != nil {
- return *m.FederatedIdentity
- }
- return ""
-}
-
-func (m *PropertyValue_UserValue) GetFederatedProvider() string {
- if m != nil && m.FederatedProvider != nil {
- return *m.FederatedProvider
- }
- return ""
-}
-
-type PropertyValue_ReferenceValue struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
-func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue) ProtoMessage() {}
-func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2}
-}
-func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b)
-}
-func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src)
-}
-func (m *PropertyValue_ReferenceValue) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m)
-}
-func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo
-
-func (m *PropertyValue_ReferenceValue) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
- if m != nil {
- return m.Pathelement
- }
- return nil
-}
-
-type PropertyValue_ReferenceValue_PathElement struct {
- Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
- *m = PropertyValue_ReferenceValue_PathElement{}
-}
-func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
-func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
-func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0}
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic)
-}
-func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int {
- return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m)
-}
-func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() {
- xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-type Property struct {
- Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
- MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"`
- Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
- Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
- Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
- Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
- FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
- Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Property) Reset() { *m = Property{} }
-func (m *Property) String() string { return proto.CompactTextString(m) }
-func (*Property) ProtoMessage() {}
-func (*Property) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2}
-}
-func (m *Property) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Property.Unmarshal(m, b)
-}
-func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Property.Marshal(b, m, deterministic)
-}
-func (dst *Property) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Property.Merge(dst, src)
-}
-func (m *Property) XXX_Size() int {
- return xxx_messageInfo_Property.Size(m)
-}
-func (m *Property) XXX_DiscardUnknown() {
- xxx_messageInfo_Property.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Property proto.InternalMessageInfo
-
-const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
-const Default_Property_Searchable bool = false
-const Default_Property_Locale string = "en"
-
-func (m *Property) GetMeaning() Property_Meaning {
- if m != nil && m.Meaning != nil {
- return *m.Meaning
- }
- return Default_Property_Meaning
-}
-
-func (m *Property) GetMeaningUri() string {
- if m != nil && m.MeaningUri != nil {
- return *m.MeaningUri
- }
- return ""
-}
-
-func (m *Property) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *Property) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *Property) GetMultiple() bool {
- if m != nil && m.Multiple != nil {
- return *m.Multiple
- }
- return false
-}
-
-func (m *Property) GetSearchable() bool {
- if m != nil && m.Searchable != nil {
- return *m.Searchable
- }
- return Default_Property_Searchable
-}
-
-func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
- if m != nil && m.FtsTokenizationOption != nil {
- return *m.FtsTokenizationOption
- }
- return Property_HTML
-}
-
-func (m *Property) GetLocale() string {
- if m != nil && m.Locale != nil {
- return *m.Locale
- }
- return Default_Property_Locale
-}
-
-type Path struct {
- Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Path) Reset() { *m = Path{} }
-func (m *Path) String() string { return proto.CompactTextString(m) }
-func (*Path) ProtoMessage() {}
-func (*Path) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3}
-}
-func (m *Path) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Path.Unmarshal(m, b)
-}
-func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Path.Marshal(b, m, deterministic)
-}
-func (dst *Path) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Path.Merge(dst, src)
-}
-func (m *Path) XXX_Size() int {
- return xxx_messageInfo_Path.Size(m)
-}
-func (m *Path) XXX_DiscardUnknown() {
- xxx_messageInfo_Path.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Path proto.InternalMessageInfo
-
-func (m *Path) GetElement() []*Path_Element {
- if m != nil {
- return m.Element
- }
- return nil
-}
-
-type Path_Element struct {
- Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
- Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
- Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Path_Element) Reset() { *m = Path_Element{} }
-func (m *Path_Element) String() string { return proto.CompactTextString(m) }
-func (*Path_Element) ProtoMessage() {}
-func (*Path_Element) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0}
-}
-func (m *Path_Element) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Path_Element.Unmarshal(m, b)
-}
-func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic)
-}
-func (dst *Path_Element) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Path_Element.Merge(dst, src)
-}
-func (m *Path_Element) XXX_Size() int {
- return xxx_messageInfo_Path_Element.Size(m)
-}
-func (m *Path_Element) XXX_DiscardUnknown() {
- xxx_messageInfo_Path_Element.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Path_Element proto.InternalMessageInfo
-
-func (m *Path_Element) GetType() string {
- if m != nil && m.Type != nil {
- return *m.Type
- }
- return ""
-}
-
-func (m *Path_Element) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *Path_Element) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-type Reference struct {
- App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Reference) Reset() { *m = Reference{} }
-func (m *Reference) String() string { return proto.CompactTextString(m) }
-func (*Reference) ProtoMessage() {}
-func (*Reference) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4}
-}
-func (m *Reference) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Reference.Unmarshal(m, b)
-}
-func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Reference.Marshal(b, m, deterministic)
-}
-func (dst *Reference) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Reference.Merge(dst, src)
-}
-func (m *Reference) XXX_Size() int {
- return xxx_messageInfo_Reference.Size(m)
-}
-func (m *Reference) XXX_DiscardUnknown() {
- xxx_messageInfo_Reference.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Reference proto.InternalMessageInfo
-
-func (m *Reference) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Reference) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *Reference) GetPath() *Path {
- if m != nil {
- return m.Path
- }
- return nil
-}
-
-type User struct {
- Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
- AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"`
- Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
- FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"`
- FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *User) Reset() { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage() {}
-func (*User) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5}
-}
-func (m *User) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_User.Unmarshal(m, b)
-}
-func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_User.Marshal(b, m, deterministic)
-}
-func (dst *User) XXX_Merge(src proto.Message) {
- xxx_messageInfo_User.Merge(dst, src)
-}
-func (m *User) XXX_Size() int {
- return xxx_messageInfo_User.Size(m)
-}
-func (m *User) XXX_DiscardUnknown() {
- xxx_messageInfo_User.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_User proto.InternalMessageInfo
-
-func (m *User) GetEmail() string {
- if m != nil && m.Email != nil {
- return *m.Email
- }
- return ""
-}
-
-func (m *User) GetAuthDomain() string {
- if m != nil && m.AuthDomain != nil {
- return *m.AuthDomain
- }
- return ""
-}
-
-func (m *User) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *User) GetFederatedIdentity() string {
- if m != nil && m.FederatedIdentity != nil {
- return *m.FederatedIdentity
- }
- return ""
-}
-
-func (m *User) GetFederatedProvider() string {
- if m != nil && m.FederatedProvider != nil {
- return *m.FederatedProvider
- }
- return ""
-}
-
-type EntityProto struct {
- Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
- EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"`
- Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
- Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
- KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"`
- Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EntityProto) Reset() { *m = EntityProto{} }
-func (m *EntityProto) String() string { return proto.CompactTextString(m) }
-func (*EntityProto) ProtoMessage() {}
-func (*EntityProto) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6}
-}
-func (m *EntityProto) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_EntityProto.Unmarshal(m, b)
-}
-func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic)
-}
-func (dst *EntityProto) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EntityProto.Merge(dst, src)
-}
-func (m *EntityProto) XXX_Size() int {
- return xxx_messageInfo_EntityProto.Size(m)
-}
-func (m *EntityProto) XXX_DiscardUnknown() {
- xxx_messageInfo_EntityProto.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EntityProto proto.InternalMessageInfo
-
-func (m *EntityProto) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *EntityProto) GetEntityGroup() *Path {
- if m != nil {
- return m.EntityGroup
- }
- return nil
-}
-
-func (m *EntityProto) GetOwner() *User {
- if m != nil {
- return m.Owner
- }
- return nil
-}
-
-func (m *EntityProto) GetKind() EntityProto_Kind {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return EntityProto_GD_CONTACT
-}
-
-func (m *EntityProto) GetKindUri() string {
- if m != nil && m.KindUri != nil {
- return *m.KindUri
- }
- return ""
-}
-
-func (m *EntityProto) GetProperty() []*Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-func (m *EntityProto) GetRawProperty() []*Property {
- if m != nil {
- return m.RawProperty
- }
- return nil
-}
-
-func (m *EntityProto) GetRank() int32 {
- if m != nil && m.Rank != nil {
- return *m.Rank
- }
- return 0
-}
-
-type CompositeProperty struct {
- IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"`
- Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
-func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
-func (*CompositeProperty) ProtoMessage() {}
-func (*CompositeProperty) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7}
-}
-func (m *CompositeProperty) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompositeProperty.Unmarshal(m, b)
-}
-func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic)
-}
-func (dst *CompositeProperty) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompositeProperty.Merge(dst, src)
-}
-func (m *CompositeProperty) XXX_Size() int {
- return xxx_messageInfo_CompositeProperty.Size(m)
-}
-func (m *CompositeProperty) XXX_DiscardUnknown() {
- xxx_messageInfo_CompositeProperty.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo
-
-func (m *CompositeProperty) GetIndexId() int64 {
- if m != nil && m.IndexId != nil {
- return *m.IndexId
- }
- return 0
-}
-
-func (m *CompositeProperty) GetValue() []string {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type Index struct {
- EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"`
- Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
- Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Index) Reset() { *m = Index{} }
-func (m *Index) String() string { return proto.CompactTextString(m) }
-func (*Index) ProtoMessage() {}
-func (*Index) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8}
-}
-func (m *Index) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Index.Unmarshal(m, b)
-}
-func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Index.Marshal(b, m, deterministic)
-}
-func (dst *Index) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Index.Merge(dst, src)
-}
-func (m *Index) XXX_Size() int {
- return xxx_messageInfo_Index.Size(m)
-}
-func (m *Index) XXX_DiscardUnknown() {
- xxx_messageInfo_Index.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Index proto.InternalMessageInfo
-
-func (m *Index) GetEntityType() string {
- if m != nil && m.EntityType != nil {
- return *m.EntityType
- }
- return ""
-}
-
-func (m *Index) GetAncestor() bool {
- if m != nil && m.Ancestor != nil {
- return *m.Ancestor
- }
- return false
-}
-
-func (m *Index) GetProperty() []*Index_Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-type Index_Property struct {
- Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
- Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Index_Property) Reset() { *m = Index_Property{} }
-func (m *Index_Property) String() string { return proto.CompactTextString(m) }
-func (*Index_Property) ProtoMessage() {}
-func (*Index_Property) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0}
-}
-func (m *Index_Property) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Index_Property.Unmarshal(m, b)
-}
-func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic)
-}
-func (dst *Index_Property) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Index_Property.Merge(dst, src)
-}
-func (m *Index_Property) XXX_Size() int {
- return xxx_messageInfo_Index_Property.Size(m)
-}
-func (m *Index_Property) XXX_DiscardUnknown() {
- xxx_messageInfo_Index_Property.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Index_Property proto.InternalMessageInfo
-
-const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
-
-func (m *Index_Property) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
- }
- return ""
-}
-
-func (m *Index_Property) GetDirection() Index_Property_Direction {
- if m != nil && m.Direction != nil {
- return *m.Direction
- }
- return Default_Index_Property_Direction
-}
-
-type CompositeIndex struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
- Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
- State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
- OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
-func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndex) ProtoMessage() {}
-func (*CompositeIndex) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9}
-}
-func (m *CompositeIndex) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompositeIndex.Unmarshal(m, b)
-}
-func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic)
-}
-func (dst *CompositeIndex) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompositeIndex.Merge(dst, src)
-}
-func (m *CompositeIndex) XXX_Size() int {
- return xxx_messageInfo_CompositeIndex.Size(m)
-}
-func (m *CompositeIndex) XXX_DiscardUnknown() {
- xxx_messageInfo_CompositeIndex.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo
-
-const Default_CompositeIndex_OnlyUseIfRequired bool = false
-
-func (m *CompositeIndex) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *CompositeIndex) GetId() int64 {
- if m != nil && m.Id != nil {
- return *m.Id
- }
- return 0
-}
-
-func (m *CompositeIndex) GetDefinition() *Index {
- if m != nil {
- return m.Definition
- }
- return nil
-}
-
-func (m *CompositeIndex) GetState() CompositeIndex_State {
- if m != nil && m.State != nil {
- return *m.State
- }
- return CompositeIndex_WRITE_ONLY
-}
-
-func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
- if m != nil && m.OnlyUseIfRequired != nil {
- return *m.OnlyUseIfRequired
- }
- return Default_CompositeIndex_OnlyUseIfRequired
-}
-
-type IndexPostfix struct {
- IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"`
- Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
-func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix) ProtoMessage() {}
-func (*IndexPostfix) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10}
-}
-func (m *IndexPostfix) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_IndexPostfix.Unmarshal(m, b)
-}
-func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic)
-}
-func (dst *IndexPostfix) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IndexPostfix.Merge(dst, src)
-}
-func (m *IndexPostfix) XXX_Size() int {
- return xxx_messageInfo_IndexPostfix.Size(m)
-}
-func (m *IndexPostfix) XXX_DiscardUnknown() {
- xxx_messageInfo_IndexPostfix.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo
-
-const Default_IndexPostfix_Before bool = true
-
-func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
- if m != nil {
- return m.IndexValue
- }
- return nil
-}
-
-func (m *IndexPostfix) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *IndexPostfix) GetBefore() bool {
- if m != nil && m.Before != nil {
- return *m.Before
- }
- return Default_IndexPostfix_Before
-}
-
-type IndexPostfix_IndexValue struct {
- PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"`
- Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
-func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*IndexPostfix_IndexValue) ProtoMessage() {}
-func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0}
-}
-func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b)
-}
-func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic)
-}
-func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src)
-}
-func (m *IndexPostfix_IndexValue) XXX_Size() int {
- return xxx_messageInfo_IndexPostfix_IndexValue.Size(m)
-}
-func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() {
- xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo
-
-func (m *IndexPostfix_IndexValue) GetPropertyName() string {
- if m != nil && m.PropertyName != nil {
- return *m.PropertyName
- }
- return ""
-}
-
-func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type IndexPosition struct {
- Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
- Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *IndexPosition) Reset() { *m = IndexPosition{} }
-func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
-func (*IndexPosition) ProtoMessage() {}
-func (*IndexPosition) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11}
-}
-func (m *IndexPosition) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_IndexPosition.Unmarshal(m, b)
-}
-func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic)
-}
-func (dst *IndexPosition) XXX_Merge(src proto.Message) {
- xxx_messageInfo_IndexPosition.Merge(dst, src)
-}
-func (m *IndexPosition) XXX_Size() int {
- return xxx_messageInfo_IndexPosition.Size(m)
-}
-func (m *IndexPosition) XXX_DiscardUnknown() {
- xxx_messageInfo_IndexPosition.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_IndexPosition proto.InternalMessageInfo
-
-const Default_IndexPosition_Before bool = true
-
-func (m *IndexPosition) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *IndexPosition) GetBefore() bool {
- if m != nil && m.Before != nil {
- return *m.Before
- }
- return Default_IndexPosition_Before
-}
-
-type Snapshot struct {
- Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Snapshot.Unmarshal(m, b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
-}
-func (dst *Snapshot) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Snapshot.Merge(dst, src)
-}
-func (m *Snapshot) XXX_Size() int {
- return xxx_messageInfo_Snapshot.Size(m)
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
- xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-func (m *Snapshot) GetTs() int64 {
- if m != nil && m.Ts != nil {
- return *m.Ts
- }
- return 0
-}
-
-type InternalHeader struct {
- Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *InternalHeader) Reset() { *m = InternalHeader{} }
-func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
-func (*InternalHeader) ProtoMessage() {}
-func (*InternalHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13}
-}
-func (m *InternalHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_InternalHeader.Unmarshal(m, b)
-}
-func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic)
-}
-func (dst *InternalHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InternalHeader.Merge(dst, src)
-}
-func (m *InternalHeader) XXX_Size() int {
- return xxx_messageInfo_InternalHeader.Size(m)
-}
-func (m *InternalHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_InternalHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalHeader proto.InternalMessageInfo
-
-func (m *InternalHeader) GetQos() string {
- if m != nil && m.Qos != nil {
- return *m.Qos
- }
- return ""
-}
-
-type Transaction struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
- App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
- MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Transaction) Reset() { *m = Transaction{} }
-func (m *Transaction) String() string { return proto.CompactTextString(m) }
-func (*Transaction) ProtoMessage() {}
-func (*Transaction) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14}
-}
-func (m *Transaction) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Transaction.Unmarshal(m, b)
-}
-func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Transaction.Marshal(b, m, deterministic)
-}
-func (dst *Transaction) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Transaction.Merge(dst, src)
-}
-func (m *Transaction) XXX_Size() int {
- return xxx_messageInfo_Transaction.Size(m)
-}
-func (m *Transaction) XXX_DiscardUnknown() {
- xxx_messageInfo_Transaction.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Transaction proto.InternalMessageInfo
-
-const Default_Transaction_MarkChanges bool = false
-
-func (m *Transaction) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *Transaction) GetHandle() uint64 {
- if m != nil && m.Handle != nil {
- return *m.Handle
- }
- return 0
-}
-
-func (m *Transaction) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Transaction) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_Transaction_MarkChanges
-}
-
-type Query struct {
- Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"`
- Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
- Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"`
- SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"`
- Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"`
- Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
- Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
- EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"`
- KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"`
- Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
- Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
- FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
- PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
- GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"`
- Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
- MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"`
- SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"`
- PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Query) Reset() { *m = Query{} }
-func (m *Query) String() string { return proto.CompactTextString(m) }
-func (*Query) ProtoMessage() {}
-func (*Query) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15}
-}
-func (m *Query) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Query.Unmarshal(m, b)
-}
-func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Query.Marshal(b, m, deterministic)
-}
-func (dst *Query) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Query.Merge(dst, src)
-}
-func (m *Query) XXX_Size() int {
- return xxx_messageInfo_Query.Size(m)
-}
-func (m *Query) XXX_DiscardUnknown() {
- xxx_messageInfo_Query.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query proto.InternalMessageInfo
-
-const Default_Query_Offset int32 = 0
-const Default_Query_RequirePerfectPlan bool = false
-const Default_Query_KeysOnly bool = false
-const Default_Query_Compile bool = false
-const Default_Query_PersistOffset bool = false
-
-func (m *Query) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *Query) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *Query) GetNameSpace() string {
- if m != nil && m.NameSpace != nil {
- return *m.NameSpace
- }
- return ""
-}
-
-func (m *Query) GetKind() string {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return ""
-}
-
-func (m *Query) GetAncestor() *Reference {
- if m != nil {
- return m.Ancestor
- }
- return nil
-}
-
-func (m *Query) GetFilter() []*Query_Filter {
- if m != nil {
- return m.Filter
- }
- return nil
-}
-
-func (m *Query) GetSearchQuery() string {
- if m != nil && m.SearchQuery != nil {
- return *m.SearchQuery
- }
- return ""
-}
-
-func (m *Query) GetOrder() []*Query_Order {
- if m != nil {
- return m.Order
- }
- return nil
-}
-
-func (m *Query) GetHint() Query_Hint {
- if m != nil && m.Hint != nil {
- return *m.Hint
- }
- return Query_ORDER_FIRST
-}
-
-func (m *Query) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *Query) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_Query_Offset
-}
-
-func (m *Query) GetLimit() int32 {
- if m != nil && m.Limit != nil {
- return *m.Limit
- }
- return 0
-}
-
-func (m *Query) GetCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.CompiledCursor
- }
- return nil
-}
-
-func (m *Query) GetEndCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.EndCompiledCursor
- }
- return nil
-}
-
-func (m *Query) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *Query) GetRequirePerfectPlan() bool {
- if m != nil && m.RequirePerfectPlan != nil {
- return *m.RequirePerfectPlan
- }
- return Default_Query_RequirePerfectPlan
-}
-
-func (m *Query) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return Default_Query_KeysOnly
-}
-
-func (m *Query) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *Query) GetCompile() bool {
- if m != nil && m.Compile != nil {
- return *m.Compile
- }
- return Default_Query_Compile
-}
-
-func (m *Query) GetFailoverMs() int64 {
- if m != nil && m.FailoverMs != nil {
- return *m.FailoverMs
- }
- return 0
-}
-
-func (m *Query) GetStrong() bool {
- if m != nil && m.Strong != nil {
- return *m.Strong
- }
- return false
-}
-
-func (m *Query) GetPropertyName() []string {
- if m != nil {
- return m.PropertyName
- }
- return nil
-}
-
-func (m *Query) GetGroupByPropertyName() []string {
- if m != nil {
- return m.GroupByPropertyName
- }
- return nil
-}
-
-func (m *Query) GetDistinct() bool {
- if m != nil && m.Distinct != nil {
- return *m.Distinct
- }
- return false
-}
-
-func (m *Query) GetMinSafeTimeSeconds() int64 {
- if m != nil && m.MinSafeTimeSeconds != nil {
- return *m.MinSafeTimeSeconds
- }
- return 0
-}
-
-func (m *Query) GetSafeReplicaName() []string {
- if m != nil {
- return m.SafeReplicaName
- }
- return nil
-}
-
-func (m *Query) GetPersistOffset() bool {
- if m != nil && m.PersistOffset != nil {
- return *m.PersistOffset
- }
- return Default_Query_PersistOffset
-}
-
-type Query_Filter struct {
- Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
- Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Query_Filter) Reset() { *m = Query_Filter{} }
-func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
-func (*Query_Filter) ProtoMessage() {}
-func (*Query_Filter) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0}
-}
-func (m *Query_Filter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Query_Filter.Unmarshal(m, b)
-}
-func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic)
-}
-func (dst *Query_Filter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Query_Filter.Merge(dst, src)
-}
-func (m *Query_Filter) XXX_Size() int {
- return xxx_messageInfo_Query_Filter.Size(m)
-}
-func (m *Query_Filter) XXX_DiscardUnknown() {
- xxx_messageInfo_Query_Filter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query_Filter proto.InternalMessageInfo
-
-func (m *Query_Filter) GetOp() Query_Filter_Operator {
- if m != nil && m.Op != nil {
- return *m.Op
- }
- return Query_Filter_LESS_THAN
-}
-
-func (m *Query_Filter) GetProperty() []*Property {
- if m != nil {
- return m.Property
- }
- return nil
-}
-
-type Query_Order struct {
- Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
- Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Query_Order) Reset() { *m = Query_Order{} }
-func (m *Query_Order) String() string { return proto.CompactTextString(m) }
-func (*Query_Order) ProtoMessage() {}
-func (*Query_Order) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1}
-}
-func (m *Query_Order) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Query_Order.Unmarshal(m, b)
-}
-func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic)
-}
-func (dst *Query_Order) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Query_Order.Merge(dst, src)
-}
-func (m *Query_Order) XXX_Size() int {
- return xxx_messageInfo_Query_Order.Size(m)
-}
-func (m *Query_Order) XXX_DiscardUnknown() {
- xxx_messageInfo_Query_Order.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Query_Order proto.InternalMessageInfo
-
-const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
-
-func (m *Query_Order) GetProperty() string {
- if m != nil && m.Property != nil {
- return *m.Property
- }
- return ""
-}
-
-func (m *Query_Order) GetDirection() Query_Order_Direction {
- if m != nil && m.Direction != nil {
- return *m.Direction
- }
- return Default_Query_Order_Direction
-}
-
-type CompiledQuery struct {
- Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"`
- Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"`
- IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"`
- Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
- Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
- KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
- PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"`
- DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"`
- Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
-func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery) ProtoMessage() {}
-func (*CompiledQuery) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16}
-}
-func (m *CompiledQuery) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery.Unmarshal(m, b)
-}
-func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery.Merge(dst, src)
-}
-func (m *CompiledQuery) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery.Size(m)
-}
-func (m *CompiledQuery) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo
-
-const Default_CompiledQuery_Offset int32 = 0
-
-func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
- if m != nil {
- return m.Primaryscan
- }
- return nil
-}
-
-func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
- if m != nil {
- return m.Mergejoinscan
- }
- return nil
-}
-
-func (m *CompiledQuery) GetIndexDef() *Index {
- if m != nil {
- return m.IndexDef
- }
- return nil
-}
-
-func (m *CompiledQuery) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_CompiledQuery_Offset
-}
-
-func (m *CompiledQuery) GetLimit() int32 {
- if m != nil && m.Limit != nil {
- return *m.Limit
- }
- return 0
-}
-
-func (m *CompiledQuery) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return false
-}
-
-func (m *CompiledQuery) GetPropertyName() []string {
- if m != nil {
- return m.PropertyName
- }
- return nil
-}
-
-func (m *CompiledQuery) GetDistinctInfixSize() int32 {
- if m != nil && m.DistinctInfixSize != nil {
- return *m.DistinctInfixSize
- }
- return 0
-}
-
-func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
- if m != nil {
- return m.Entityfilter
- }
- return nil
-}
-
-type CompiledQuery_PrimaryScan struct {
- IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"`
- StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
- StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"`
- EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"`
- EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"`
- StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"`
- EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"`
- EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
-func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
-func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0}
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m)
-}
-func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo
-
-func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
- if m != nil && m.IndexName != nil {
- return *m.IndexName
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
- if m != nil && m.StartKey != nil {
- return *m.StartKey
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
- if m != nil && m.StartInclusive != nil {
- return *m.StartInclusive
- }
- return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
- if m != nil && m.EndKey != nil {
- return *m.EndKey
- }
- return ""
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
- if m != nil && m.EndInclusive != nil {
- return *m.EndInclusive
- }
- return false
-}
-
-func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
- if m != nil {
- return m.StartPostfixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
- if m != nil {
- return m.EndPostfixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
- if m != nil && m.EndUnappliedLogTimestampUs != nil {
- return *m.EndUnappliedLogTimestampUs
- }
- return 0
-}
-
-type CompiledQuery_MergeJoinScan struct {
- IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"`
- PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"`
- ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
-func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
-func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1}
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m)
-}
-func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo
-
-const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
-
-func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
- if m != nil && m.IndexName != nil {
- return *m.IndexName
- }
- return ""
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
- if m != nil {
- return m.PrefixValue
- }
- return nil
-}
-
-func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
- if m != nil && m.ValuePrefix != nil {
- return *m.ValuePrefix
- }
- return Default_CompiledQuery_MergeJoinScan_ValuePrefix
-}
-
-type CompiledQuery_EntityFilter struct {
- Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
- Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
- Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
-func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
-func (*CompiledQuery_EntityFilter) ProtoMessage() {}
-func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2}
-}
-func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b)
-}
-func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic)
-}
-func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src)
-}
-func (m *CompiledQuery_EntityFilter) XXX_Size() int {
- return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m)
-}
-func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo
-
-const Default_CompiledQuery_EntityFilter_Distinct bool = false
-
-func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
- if m != nil && m.Distinct != nil {
- return *m.Distinct
- }
- return Default_CompiledQuery_EntityFilter_Distinct
-}
-
-func (m *CompiledQuery_EntityFilter) GetKind() string {
- if m != nil && m.Kind != nil {
- return *m.Kind
- }
- return ""
-}
-
-func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
- if m != nil {
- return m.Ancestor
- }
- return nil
-}
-
-type CompiledCursor struct {
- Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
-func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor) ProtoMessage() {}
-func (*CompiledCursor) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17}
-}
-func (m *CompiledCursor) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledCursor.Unmarshal(m, b)
-}
-func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledCursor.Merge(dst, src)
-}
-func (m *CompiledCursor) XXX_Size() int {
- return xxx_messageInfo_CompiledCursor.Size(m)
-}
-func (m *CompiledCursor) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledCursor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo
-
-func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
- if m != nil {
- return m.Position
- }
- return nil
-}
-
-type CompiledCursor_Position struct {
- StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"`
- Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"`
- Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
- StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
-func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position) ProtoMessage() {}
-func (*CompiledCursor_Position) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0}
-}
-func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b)
-}
-func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledCursor_Position.Merge(dst, src)
-}
-func (m *CompiledCursor_Position) XXX_Size() int {
- return xxx_messageInfo_CompiledCursor_Position.Size(m)
-}
-func (m *CompiledCursor_Position) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo
-
-const Default_CompiledCursor_Position_StartInclusive bool = true
-
-func (m *CompiledCursor_Position) GetStartKey() string {
- if m != nil && m.StartKey != nil {
- return *m.StartKey
- }
- return ""
-}
-
-func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
- if m != nil {
- return m.Indexvalue
- }
- return nil
-}
-
-func (m *CompiledCursor_Position) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *CompiledCursor_Position) GetStartInclusive() bool {
- if m != nil && m.StartInclusive != nil {
- return *m.StartInclusive
- }
- return Default_CompiledCursor_Position_StartInclusive
-}
-
-type CompiledCursor_Position_IndexValue struct {
- Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
- Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
-func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
-func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
-func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0}
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic)
-}
-func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_Size() int {
- return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m)
-}
-func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() {
- xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo
-
-func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
- if m != nil && m.Property != nil {
- return *m.Property
- }
- return ""
-}
-
-func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type Cursor struct {
- Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
- App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Cursor) Reset() { *m = Cursor{} }
-func (m *Cursor) String() string { return proto.CompactTextString(m) }
-func (*Cursor) ProtoMessage() {}
-func (*Cursor) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18}
-}
-func (m *Cursor) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Cursor.Unmarshal(m, b)
-}
-func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Cursor.Marshal(b, m, deterministic)
-}
-func (dst *Cursor) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Cursor.Merge(dst, src)
-}
-func (m *Cursor) XXX_Size() int {
- return xxx_messageInfo_Cursor.Size(m)
-}
-func (m *Cursor) XXX_DiscardUnknown() {
- xxx_messageInfo_Cursor.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cursor proto.InternalMessageInfo
-
-func (m *Cursor) GetCursor() uint64 {
- if m != nil && m.Cursor != nil {
- return *m.Cursor
- }
- return 0
-}
-
-func (m *Cursor) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-type Error struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Error) Reset() { *m = Error{} }
-func (m *Error) String() string { return proto.CompactTextString(m) }
-func (*Error) ProtoMessage() {}
-func (*Error) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19}
-}
-func (m *Error) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Error.Unmarshal(m, b)
-}
-func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Error.Marshal(b, m, deterministic)
-}
-func (dst *Error) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Error.Merge(dst, src)
-}
-func (m *Error) XXX_Size() int {
- return xxx_messageInfo_Error.Size(m)
-}
-func (m *Error) XXX_DiscardUnknown() {
- xxx_messageInfo_Error.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Error proto.InternalMessageInfo
-
-type Cost struct {
- IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"`
- IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"`
- EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"`
- EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"`
- Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"`
- ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"`
- IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Cost) Reset() { *m = Cost{} }
-func (m *Cost) String() string { return proto.CompactTextString(m) }
-func (*Cost) ProtoMessage() {}
-func (*Cost) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20}
-}
-func (m *Cost) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Cost.Unmarshal(m, b)
-}
-func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Cost.Marshal(b, m, deterministic)
-}
-func (dst *Cost) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Cost.Merge(dst, src)
-}
-func (m *Cost) XXX_Size() int {
- return xxx_messageInfo_Cost.Size(m)
-}
-func (m *Cost) XXX_DiscardUnknown() {
- xxx_messageInfo_Cost.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cost proto.InternalMessageInfo
-
-func (m *Cost) GetIndexWrites() int32 {
- if m != nil && m.IndexWrites != nil {
- return *m.IndexWrites
- }
- return 0
-}
-
-func (m *Cost) GetIndexWriteBytes() int32 {
- if m != nil && m.IndexWriteBytes != nil {
- return *m.IndexWriteBytes
- }
- return 0
-}
-
-func (m *Cost) GetEntityWrites() int32 {
- if m != nil && m.EntityWrites != nil {
- return *m.EntityWrites
- }
- return 0
-}
-
-func (m *Cost) GetEntityWriteBytes() int32 {
- if m != nil && m.EntityWriteBytes != nil {
- return *m.EntityWriteBytes
- }
- return 0
-}
-
-func (m *Cost) GetCommitcost() *Cost_CommitCost {
- if m != nil {
- return m.Commitcost
- }
- return nil
-}
-
-func (m *Cost) GetApproximateStorageDelta() int32 {
- if m != nil && m.ApproximateStorageDelta != nil {
- return *m.ApproximateStorageDelta
- }
- return 0
-}
-
-func (m *Cost) GetIdSequenceUpdates() int32 {
- if m != nil && m.IdSequenceUpdates != nil {
- return *m.IdSequenceUpdates
- }
- return 0
-}
-
-type Cost_CommitCost struct {
- RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"`
- RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
-func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
-func (*Cost_CommitCost) ProtoMessage() {}
-func (*Cost_CommitCost) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0}
-}
-func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b)
-}
-func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic)
-}
-func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Cost_CommitCost.Merge(dst, src)
-}
-func (m *Cost_CommitCost) XXX_Size() int {
- return xxx_messageInfo_Cost_CommitCost.Size(m)
-}
-func (m *Cost_CommitCost) XXX_DiscardUnknown() {
- xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo
-
-func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
- if m != nil && m.RequestedEntityPuts != nil {
- return *m.RequestedEntityPuts
- }
- return 0
-}
-
-func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
- if m != nil && m.RequestedEntityDeletes != nil {
- return *m.RequestedEntityDeletes
- }
- return 0
-}
-
-type GetRequest struct {
- Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"`
- Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
- AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetRequest) Reset() { *m = GetRequest{} }
-func (m *GetRequest) String() string { return proto.CompactTextString(m) }
-func (*GetRequest) ProtoMessage() {}
-func (*GetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21}
-}
-func (m *GetRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetRequest.Unmarshal(m, b)
-}
-func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic)
-}
-func (dst *GetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetRequest.Merge(dst, src)
-}
-func (m *GetRequest) XXX_Size() int {
- return xxx_messageInfo_GetRequest.Size(m)
-}
-func (m *GetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_GetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetRequest proto.InternalMessageInfo
-
-const Default_GetRequest_AllowDeferred bool = false
-
-func (m *GetRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *GetRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *GetRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *GetRequest) GetFailoverMs() int64 {
- if m != nil && m.FailoverMs != nil {
- return *m.FailoverMs
- }
- return 0
-}
-
-func (m *GetRequest) GetStrong() bool {
- if m != nil && m.Strong != nil {
- return *m.Strong
- }
- return false
-}
-
-func (m *GetRequest) GetAllowDeferred() bool {
- if m != nil && m.AllowDeferred != nil {
- return *m.AllowDeferred
- }
- return Default_GetRequest_AllowDeferred
-}
-
-type GetResponse struct {
- Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"`
- Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
- InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetResponse) Reset() { *m = GetResponse{} }
-func (m *GetResponse) String() string { return proto.CompactTextString(m) }
-func (*GetResponse) ProtoMessage() {}
-func (*GetResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22}
-}
-func (m *GetResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetResponse.Unmarshal(m, b)
-}
-func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic)
-}
-func (dst *GetResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetResponse.Merge(dst, src)
-}
-func (m *GetResponse) XXX_Size() int {
- return xxx_messageInfo_GetResponse.Size(m)
-}
-func (m *GetResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_GetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetResponse proto.InternalMessageInfo
-
-const Default_GetResponse_InOrder bool = true
-
-func (m *GetResponse) GetEntity() []*GetResponse_Entity {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *GetResponse) GetDeferred() []*Reference {
- if m != nil {
- return m.Deferred
- }
- return nil
-}
-
-func (m *GetResponse) GetInOrder() bool {
- if m != nil && m.InOrder != nil {
- return *m.InOrder
- }
- return Default_GetResponse_InOrder
-}
-
-type GetResponse_Entity struct {
- Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
- Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
- Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
-func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
-func (*GetResponse_Entity) ProtoMessage() {}
-func (*GetResponse_Entity) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0}
-}
-func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b)
-}
-func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic)
-}
-func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GetResponse_Entity.Merge(dst, src)
-}
-func (m *GetResponse_Entity) XXX_Size() int {
- return xxx_messageInfo_GetResponse_Entity.Size(m)
-}
-func (m *GetResponse_Entity) XXX_DiscardUnknown() {
- xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo
-
-func (m *GetResponse_Entity) GetEntity() *EntityProto {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *GetResponse_Entity) GetKey() *Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *GetResponse_Entity) GetVersion() int64 {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return 0
-}
-
-type PutRequest struct {
- Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
- Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
- Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutRequest) Reset() { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage() {}
-func (*PutRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23}
-}
-func (m *PutRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PutRequest.Unmarshal(m, b)
-}
-func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
-}
-func (dst *PutRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutRequest.Merge(dst, src)
-}
-func (m *PutRequest) XXX_Size() int {
- return xxx_messageInfo_PutRequest.Size(m)
-}
-func (m *PutRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_PutRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutRequest proto.InternalMessageInfo
-
-const Default_PutRequest_Trusted bool = false
-const Default_PutRequest_Force bool = false
-const Default_PutRequest_MarkChanges bool = false
-const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
-
-func (m *PutRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *PutRequest) GetEntity() []*EntityProto {
- if m != nil {
- return m.Entity
- }
- return nil
-}
-
-func (m *PutRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *PutRequest) GetTrusted() bool {
- if m != nil && m.Trusted != nil {
- return *m.Trusted
- }
- return Default_PutRequest_Trusted
-}
-
-func (m *PutRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_PutRequest_Force
-}
-
-func (m *PutRequest) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_PutRequest_MarkChanges
-}
-
-func (m *PutRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
- if m != nil && m.AutoIdPolicy != nil {
- return *m.AutoIdPolicy
- }
- return Default_PutRequest_AutoIdPolicy
-}
-
-type PutResponse struct {
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutResponse) Reset() { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage() {}
-func (*PutResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24}
-}
-func (m *PutResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_PutResponse.Unmarshal(m, b)
-}
-func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
-}
-func (dst *PutResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutResponse.Merge(dst, src)
-}
-func (m *PutResponse) XXX_Size() int {
- return xxx_messageInfo_PutResponse.Size(m)
-}
-func (m *PutResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_PutResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutResponse proto.InternalMessageInfo
-
-func (m *PutResponse) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *PutResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *PutResponse) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type TouchRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
- CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"`
- Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TouchRequest) Reset() { *m = TouchRequest{} }
-func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
-func (*TouchRequest) ProtoMessage() {}
-func (*TouchRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25}
-}
-func (m *TouchRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TouchRequest.Unmarshal(m, b)
-}
-func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic)
-}
-func (dst *TouchRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TouchRequest.Merge(dst, src)
-}
-func (m *TouchRequest) XXX_Size() int {
- return xxx_messageInfo_TouchRequest.Size(m)
-}
-func (m *TouchRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TouchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TouchRequest proto.InternalMessageInfo
-
-const Default_TouchRequest_Force bool = false
-
-func (m *TouchRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *TouchRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
- if m != nil {
- return m.CompositeIndex
- }
- return nil
-}
-
-func (m *TouchRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_TouchRequest_Force
-}
-
-func (m *TouchRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-type TouchResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TouchResponse) Reset() { *m = TouchResponse{} }
-func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
-func (*TouchResponse) ProtoMessage() {}
-func (*TouchResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26}
-}
-func (m *TouchResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_TouchResponse.Unmarshal(m, b)
-}
-func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic)
-}
-func (dst *TouchResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TouchResponse.Merge(dst, src)
-}
-func (m *TouchResponse) XXX_Size() int {
- return xxx_messageInfo_TouchResponse.Size(m)
-}
-func (m *TouchResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_TouchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TouchResponse proto.InternalMessageInfo
-
-func (m *TouchResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-type DeleteRequest struct {
- Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
- Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
- Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
- Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
- Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
- MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"`
- Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
-func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRequest) ProtoMessage() {}
-func (*DeleteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27}
-}
-func (m *DeleteRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DeleteRequest.Unmarshal(m, b)
-}
-func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic)
-}
-func (dst *DeleteRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteRequest.Merge(dst, src)
-}
-func (m *DeleteRequest) XXX_Size() int {
- return xxx_messageInfo_DeleteRequest.Size(m)
-}
-func (m *DeleteRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo
-
-const Default_DeleteRequest_Trusted bool = false
-const Default_DeleteRequest_Force bool = false
-const Default_DeleteRequest_MarkChanges bool = false
-
-func (m *DeleteRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *DeleteRequest) GetKey() []*Reference {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *DeleteRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *DeleteRequest) GetTrusted() bool {
- if m != nil && m.Trusted != nil {
- return *m.Trusted
- }
- return Default_DeleteRequest_Trusted
-}
-
-func (m *DeleteRequest) GetForce() bool {
- if m != nil && m.Force != nil {
- return *m.Force
- }
- return Default_DeleteRequest_Force
-}
-
-func (m *DeleteRequest) GetMarkChanges() bool {
- if m != nil && m.MarkChanges != nil {
- return *m.MarkChanges
- }
- return Default_DeleteRequest_MarkChanges
-}
-
-func (m *DeleteRequest) GetSnapshot() []*Snapshot {
- if m != nil {
- return m.Snapshot
- }
- return nil
-}
-
-type DeleteResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
-func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteResponse) ProtoMessage() {}
-func (*DeleteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28}
-}
-func (m *DeleteResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_DeleteResponse.Unmarshal(m, b)
-}
-func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic)
-}
-func (dst *DeleteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteResponse.Merge(dst, src)
-}
-func (m *DeleteResponse) XXX_Size() int {
- return xxx_messageInfo_DeleteResponse.Size(m)
-}
-func (m *DeleteResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo
-
-func (m *DeleteResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *DeleteResponse) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type NextRequest struct {
- Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
- Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
- Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
- Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
- Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *NextRequest) Reset() { *m = NextRequest{} }
-func (m *NextRequest) String() string { return proto.CompactTextString(m) }
-func (*NextRequest) ProtoMessage() {}
-func (*NextRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29}
-}
-func (m *NextRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_NextRequest.Unmarshal(m, b)
-}
-func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic)
-}
-func (dst *NextRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_NextRequest.Merge(dst, src)
-}
-func (m *NextRequest) XXX_Size() int {
- return xxx_messageInfo_NextRequest.Size(m)
-}
-func (m *NextRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_NextRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_NextRequest proto.InternalMessageInfo
-
-const Default_NextRequest_Offset int32 = 0
-const Default_NextRequest_Compile bool = false
-
-func (m *NextRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *NextRequest) GetCursor() *Cursor {
- if m != nil {
- return m.Cursor
- }
- return nil
-}
-
-func (m *NextRequest) GetCount() int32 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *NextRequest) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
- }
- return Default_NextRequest_Offset
-}
-
-func (m *NextRequest) GetCompile() bool {
- if m != nil && m.Compile != nil {
- return *m.Compile
- }
- return Default_NextRequest_Compile
-}
-
-type QueryResult struct {
- Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
- Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
- SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"`
- MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"`
- KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"`
- IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"`
- SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"`
- CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"`
- CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"`
- Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
- Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *QueryResult) Reset() { *m = QueryResult{} }
-func (m *QueryResult) String() string { return proto.CompactTextString(m) }
-func (*QueryResult) ProtoMessage() {}
-func (*QueryResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30}
-}
-func (m *QueryResult) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_QueryResult.Unmarshal(m, b)
-}
-func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic)
-}
-func (dst *QueryResult) XXX_Merge(src proto.Message) {
- xxx_messageInfo_QueryResult.Merge(dst, src)
-}
-func (m *QueryResult) XXX_Size() int {
- return xxx_messageInfo_QueryResult.Size(m)
-}
-func (m *QueryResult) XXX_DiscardUnknown() {
- xxx_messageInfo_QueryResult.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_QueryResult proto.InternalMessageInfo
-
-func (m *QueryResult) GetCursor() *Cursor {
- if m != nil {
- return m.Cursor
- }
- return nil
-}
-
-func (m *QueryResult) GetResult() []*EntityProto {
- if m != nil {
- return m.Result
- }
- return nil
-}
-
-func (m *QueryResult) GetSkippedResults() int32 {
- if m != nil && m.SkippedResults != nil {
- return *m.SkippedResults
- }
- return 0
-}
-
-func (m *QueryResult) GetMoreResults() bool {
- if m != nil && m.MoreResults != nil {
- return *m.MoreResults
- }
- return false
-}
-
-func (m *QueryResult) GetKeysOnly() bool {
- if m != nil && m.KeysOnly != nil {
- return *m.KeysOnly
- }
- return false
-}
-
-func (m *QueryResult) GetIndexOnly() bool {
- if m != nil && m.IndexOnly != nil {
- return *m.IndexOnly
- }
- return false
-}
-
-func (m *QueryResult) GetSmallOps() bool {
- if m != nil && m.SmallOps != nil {
- return *m.SmallOps
- }
- return false
-}
-
-func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
- if m != nil {
- return m.CompiledQuery
- }
- return nil
-}
-
-func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
- if m != nil {
- return m.CompiledCursor
- }
- return nil
-}
-
-func (m *QueryResult) GetIndex() []*CompositeIndex {
- if m != nil {
- return m.Index
- }
- return nil
-}
-
-func (m *QueryResult) GetVersion() []int64 {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type AllocateIdsRequest struct {
- Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
- ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"`
- Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
- Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
- Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
-func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsRequest) ProtoMessage() {}
-func (*AllocateIdsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31}
-}
-func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b)
-}
-func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic)
-}
-func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocateIdsRequest.Merge(dst, src)
-}
-func (m *AllocateIdsRequest) XXX_Size() int {
- return xxx_messageInfo_AllocateIdsRequest.Size(m)
-}
-func (m *AllocateIdsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo
-
-func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AllocateIdsRequest) GetModelKey() *Reference {
- if m != nil {
- return m.ModelKey
- }
- return nil
-}
-
-func (m *AllocateIdsRequest) GetSize() int64 {
- if m != nil && m.Size != nil {
- return *m.Size
- }
- return 0
-}
-
-func (m *AllocateIdsRequest) GetMax() int64 {
- if m != nil && m.Max != nil {
- return *m.Max
- }
- return 0
-}
-
-func (m *AllocateIdsRequest) GetReserve() []*Reference {
- if m != nil {
- return m.Reserve
- }
- return nil
-}
-
-type AllocateIdsResponse struct {
- Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
- End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
- Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
-func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
-func (*AllocateIdsResponse) ProtoMessage() {}
-func (*AllocateIdsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32}
-}
-func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b)
-}
-func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic)
-}
-func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AllocateIdsResponse.Merge(dst, src)
-}
-func (m *AllocateIdsResponse) XXX_Size() int {
- return xxx_messageInfo_AllocateIdsResponse.Size(m)
-}
-func (m *AllocateIdsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo
-
-func (m *AllocateIdsResponse) GetStart() int64 {
- if m != nil && m.Start != nil {
- return *m.Start
- }
- return 0
-}
-
-func (m *AllocateIdsResponse) GetEnd() int64 {
- if m != nil && m.End != nil {
- return *m.End
- }
- return 0
-}
-
-func (m *AllocateIdsResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-type CompositeIndices struct {
- Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
-func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
-func (*CompositeIndices) ProtoMessage() {}
-func (*CompositeIndices) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33}
-}
-func (m *CompositeIndices) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CompositeIndices.Unmarshal(m, b)
-}
-func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic)
-}
-func (dst *CompositeIndices) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompositeIndices.Merge(dst, src)
-}
-func (m *CompositeIndices) XXX_Size() int {
- return xxx_messageInfo_CompositeIndices.Size(m)
-}
-func (m *CompositeIndices) XXX_DiscardUnknown() {
- xxx_messageInfo_CompositeIndices.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo
-
-func (m *CompositeIndices) GetIndex() []*CompositeIndex {
- if m != nil {
- return m.Index
- }
- return nil
-}
-
-type AddActionsRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
- Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
-func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
-func (*AddActionsRequest) ProtoMessage() {}
-func (*AddActionsRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34}
-}
-func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b)
-}
-func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic)
-}
-func (dst *AddActionsRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AddActionsRequest.Merge(dst, src)
-}
-func (m *AddActionsRequest) XXX_Size() int {
- return xxx_messageInfo_AddActionsRequest.Size(m)
-}
-func (m *AddActionsRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AddActionsRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo
-
-func (m *AddActionsRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AddActionsRequest) GetTransaction() *Transaction {
- if m != nil {
- return m.Transaction
- }
- return nil
-}
-
-func (m *AddActionsRequest) GetAction() []*Action {
- if m != nil {
- return m.Action
- }
- return nil
-}
-
-type AddActionsResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
-func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
-func (*AddActionsResponse) ProtoMessage() {}
-func (*AddActionsResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35}
-}
-func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b)
-}
-func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic)
-}
-func (dst *AddActionsResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AddActionsResponse.Merge(dst, src)
-}
-func (m *AddActionsResponse) XXX_Size() int {
- return xxx_messageInfo_AddActionsResponse.Size(m)
-}
-func (m *AddActionsResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AddActionsResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo
-
-type BeginTransactionRequest struct {
- Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
- App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
- AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"`
- DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"`
- Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"`
- PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
-func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
-func (*BeginTransactionRequest) ProtoMessage() {}
-func (*BeginTransactionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36}
-}
-func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b)
-}
-func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic)
-}
-func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BeginTransactionRequest.Merge(dst, src)
-}
-func (m *BeginTransactionRequest) XXX_Size() int {
- return xxx_messageInfo_BeginTransactionRequest.Size(m)
-}
-func (m *BeginTransactionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo
-
-const Default_BeginTransactionRequest_AllowMultipleEg bool = false
-const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN
-
-func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *BeginTransactionRequest) GetApp() string {
- if m != nil && m.App != nil {
- return *m.App
- }
- return ""
-}
-
-func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
- if m != nil && m.AllowMultipleEg != nil {
- return *m.AllowMultipleEg
- }
- return Default_BeginTransactionRequest_AllowMultipleEg
-}
-
-func (m *BeginTransactionRequest) GetDatabaseId() string {
- if m != nil && m.DatabaseId != nil {
- return *m.DatabaseId
- }
- return ""
-}
-
-func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode {
- if m != nil && m.Mode != nil {
- return *m.Mode
- }
- return Default_BeginTransactionRequest_Mode
-}
-
-func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction {
- if m != nil {
- return m.PreviousTransaction
- }
- return nil
-}
-
-type CommitResponse struct {
- Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
- Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CommitResponse) Reset() { *m = CommitResponse{} }
-func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse) ProtoMessage() {}
-func (*CommitResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37}
-}
-func (m *CommitResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CommitResponse.Unmarshal(m, b)
-}
-func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic)
-}
-func (dst *CommitResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CommitResponse.Merge(dst, src)
-}
-func (m *CommitResponse) XXX_Size() int {
- return xxx_messageInfo_CommitResponse.Size(m)
-}
-func (m *CommitResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_CommitResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CommitResponse proto.InternalMessageInfo
-
-func (m *CommitResponse) GetCost() *Cost {
- if m != nil {
- return m.Cost
- }
- return nil
-}
-
-func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
- if m != nil {
- return m.Version
- }
- return nil
-}
-
-type CommitResponse_Version struct {
- RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"`
- Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
-func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
-func (*CommitResponse_Version) ProtoMessage() {}
-func (*CommitResponse_Version) Descriptor() ([]byte, []int) {
- return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0}
-}
-func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b)
-}
-func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic)
-}
-func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CommitResponse_Version.Merge(dst, src)
-}
-func (m *CommitResponse_Version) XXX_Size() int {
- return xxx_messageInfo_CommitResponse_Version.Size(m)
-}
-func (m *CommitResponse_Version) XXX_DiscardUnknown() {
- xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo
-
-func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
- if m != nil {
- return m.RootEntityKey
- }
- return nil
-}
-
-func (m *CommitResponse_Version) GetVersion() int64 {
- if m != nil && m.Version != nil {
- return *m.Version
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*Action)(nil), "appengine.Action")
- proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue")
- proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue")
- proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue")
- proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue")
- proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement")
- proto.RegisterType((*Property)(nil), "appengine.Property")
- proto.RegisterType((*Path)(nil), "appengine.Path")
- proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element")
- proto.RegisterType((*Reference)(nil), "appengine.Reference")
- proto.RegisterType((*User)(nil), "appengine.User")
- proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto")
- proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty")
- proto.RegisterType((*Index)(nil), "appengine.Index")
- proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property")
- proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex")
- proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix")
- proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue")
- proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition")
- proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot")
- proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader")
- proto.RegisterType((*Transaction)(nil), "appengine.Transaction")
- proto.RegisterType((*Query)(nil), "appengine.Query")
- proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter")
- proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order")
- proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery")
- proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan")
- proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan")
- proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter")
- proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor")
- proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position")
- proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue")
- proto.RegisterType((*Cursor)(nil), "appengine.Cursor")
- proto.RegisterType((*Error)(nil), "appengine.Error")
- proto.RegisterType((*Cost)(nil), "appengine.Cost")
- proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost")
- proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest")
- proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse")
- proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity")
- proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest")
- proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse")
- proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest")
- proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse")
- proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest")
- proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse")
- proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest")
- proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult")
- proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest")
- proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse")
- proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices")
- proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest")
- proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse")
- proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest")
- proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse")
- proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179)
-}
-
-var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{
- // 4156 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46,
- 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d,
- 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48,
- 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46,
- 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8,
- 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24,
- 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd,
- 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f,
- 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74,
- 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac,
- 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8,
- 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9,
- 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22,
- 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56,
- 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b,
- 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05,
- 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c,
- 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2,
- 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16,
- 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3,
- 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce,
- 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67,
- 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66,
- 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13,
- 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c,
- 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6,
- 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a,
- 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f,
- 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15,
- 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a,
- 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b,
- 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17,
- 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad,
- 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50,
- 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6,
- 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b,
- 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4,
- 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2,
- 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc,
- 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e,
- 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62,
- 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee,
- 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f,
- 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4,
- 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02,
- 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae,
- 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94,
- 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f,
- 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a,
- 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52,
- 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc,
- 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92,
- 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9,
- 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50,
- 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9,
- 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23,
- 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f,
- 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87,
- 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda,
- 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b,
- 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d,
- 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f,
- 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b,
- 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9,
- 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0,
- 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68,
- 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b,
- 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79,
- 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63,
- 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0,
- 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1,
- 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10,
- 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b,
- 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b,
- 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08,
- 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72,
- 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23,
- 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91,
- 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f,
- 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93,
- 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c,
- 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa,
- 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f,
- 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef,
- 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4,
- 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90,
- 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f,
- 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86,
- 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d,
- 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee,
- 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c,
- 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1,
- 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7,
- 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80,
- 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c,
- 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21,
- 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6,
- 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26,
- 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba,
- 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c,
- 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2,
- 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8,
- 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90,
- 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91,
- 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58,
- 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c,
- 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b,
- 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f,
- 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02,
- 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22,
- 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b,
- 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0,
- 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18,
- 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8,
- 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b,
- 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e,
- 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84,
- 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0,
- 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec,
- 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7,
- 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60,
- 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad,
- 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4,
- 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76,
- 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0,
- 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba,
- 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5,
- 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26,
- 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60,
- 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e,
- 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33,
- 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e,
- 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e,
- 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7,
- 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45,
- 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92,
- 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb,
- 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc,
- 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43,
- 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2,
- 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5,
- 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40,
- 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa,
- 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc,
- 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8,
- 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7,
- 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6,
- 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c,
- 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a,
- 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e,
- 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8,
- 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a,
- 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55,
- 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0,
- 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e,
- 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78,
- 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8,
- 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1,
- 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a,
- 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60,
- 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf,
- 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c,
- 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d,
- 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb,
- 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f,
- 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe,
- 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1,
- 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80,
- 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2,
- 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f,
- 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d,
- 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90,
- 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb,
- 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe,
- 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7,
- 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31,
- 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe,
- 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd,
- 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99,
- 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41,
- 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1,
- 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81,
- 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63,
- 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3,
- 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff,
- 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f,
- 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14,
- 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2,
- 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4,
- 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1,
- 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b,
- 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9,
- 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18,
- 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e,
- 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9,
- 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95,
- 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30,
- 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c,
- 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73,
- 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79,
- 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59,
- 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95,
- 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4,
- 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74,
- 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49,
- 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e,
- 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42,
- 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c,
- 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b,
- 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca,
- 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c,
- 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69,
- 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a,
- 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65,
- 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96,
- 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4,
- 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1,
- 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85,
- 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf,
- 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55,
- 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58,
- 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6,
- 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e,
- 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb,
- 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd,
- 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20,
- 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63,
- 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b,
- 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27,
- 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61,
- 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44,
- 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd,
- 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91,
- 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3,
- 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0,
- 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4,
- 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74,
- 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41,
- 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02,
- 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1,
- 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06,
- 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe,
- 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59,
- 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde,
- 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89,
- 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0,
- 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb,
- 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62,
- 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5,
- 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90,
- 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02,
- 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06,
- 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91,
- 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41,
- 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37,
- 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3,
- 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71,
- 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd,
- 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01,
- 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
deleted file mode 100644
index 497b4d9a9..000000000
--- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
+++ /dev/null
@@ -1,551 +0,0 @@
-syntax = "proto2";
-option go_package = "datastore";
-
-package appengine;
-
-message Action{}
-
-message PropertyValue {
- optional int64 int64Value = 1;
- optional bool booleanValue = 2;
- optional string stringValue = 3;
- optional double doubleValue = 4;
-
- optional group PointValue = 5 {
- required double x = 6;
- required double y = 7;
- }
-
- optional group UserValue = 8 {
- required string email = 9;
- required string auth_domain = 10;
- optional string nickname = 11;
- optional string federated_identity = 21;
- optional string federated_provider = 22;
- }
-
- optional group ReferenceValue = 12 {
- required string app = 13;
- optional string name_space = 20;
- repeated group PathElement = 14 {
- required string type = 15;
- optional int64 id = 16;
- optional string name = 17;
- }
- }
-}
-
-message Property {
- enum Meaning {
- NO_MEANING = 0;
- BLOB = 14;
- TEXT = 15;
- BYTESTRING = 16;
-
- ATOM_CATEGORY = 1;
- ATOM_LINK = 2;
- ATOM_TITLE = 3;
- ATOM_CONTENT = 4;
- ATOM_SUMMARY = 5;
- ATOM_AUTHOR = 6;
-
- GD_WHEN = 7;
- GD_EMAIL = 8;
- GEORSS_POINT = 9;
- GD_IM = 10;
-
- GD_PHONENUMBER = 11;
- GD_POSTALADDRESS = 12;
-
- GD_RATING = 13;
-
- BLOBKEY = 17;
- ENTITY_PROTO = 19;
-
- INDEX_VALUE = 18;
- };
-
- optional Meaning meaning = 1 [default = NO_MEANING];
- optional string meaning_uri = 2;
-
- required string name = 3;
-
- required PropertyValue value = 5;
-
- required bool multiple = 4;
-
- optional bool searchable = 6 [default=false];
-
- enum FtsTokenizationOption {
- HTML = 1;
- ATOM = 2;
- }
-
- optional FtsTokenizationOption fts_tokenization_option = 8;
-
- optional string locale = 9 [default = "en"];
-}
-
-message Path {
- repeated group Element = 1 {
- required string type = 2;
- optional int64 id = 3;
- optional string name = 4;
- }
-}
-
-message Reference {
- required string app = 13;
- optional string name_space = 20;
- required Path path = 14;
-}
-
-message User {
- required string email = 1;
- required string auth_domain = 2;
- optional string nickname = 3;
- optional string federated_identity = 6;
- optional string federated_provider = 7;
-}
-
-message EntityProto {
- required Reference key = 13;
- required Path entity_group = 16;
- optional User owner = 17;
-
- enum Kind {
- GD_CONTACT = 1;
- GD_EVENT = 2;
- GD_MESSAGE = 3;
- }
- optional Kind kind = 4;
- optional string kind_uri = 5;
-
- repeated Property property = 14;
- repeated Property raw_property = 15;
-
- optional int32 rank = 18;
-}
-
-message CompositeProperty {
- required int64 index_id = 1;
- repeated string value = 2;
-}
-
-message Index {
- required string entity_type = 1;
- required bool ancestor = 5;
- repeated group Property = 2 {
- required string name = 3;
- enum Direction {
- ASCENDING = 1;
- DESCENDING = 2;
- }
- optional Direction direction = 4 [default = ASCENDING];
- }
-}
-
-message CompositeIndex {
- required string app_id = 1;
- required int64 id = 2;
- required Index definition = 3;
-
- enum State {
- WRITE_ONLY = 1;
- READ_WRITE = 2;
- DELETED = 3;
- ERROR = 4;
- }
- required State state = 4;
-
- optional bool only_use_if_required = 6 [default = false];
-}
-
-message IndexPostfix {
- message IndexValue {
- required string property_name = 1;
- required PropertyValue value = 2;
- }
-
- repeated IndexValue index_value = 1;
-
- optional Reference key = 2;
-
- optional bool before = 3 [default=true];
-}
-
-message IndexPosition {
- optional string key = 1;
-
- optional bool before = 2 [default=true];
-}
-
-message Snapshot {
- enum Status {
- INACTIVE = 0;
- ACTIVE = 1;
- }
-
- required int64 ts = 1;
-}
-
-message InternalHeader {
- optional string qos = 1;
-}
-
-message Transaction {
- optional InternalHeader header = 4;
- required fixed64 handle = 1;
- required string app = 2;
- optional bool mark_changes = 3 [default = false];
-}
-
-message Query {
- optional InternalHeader header = 39;
-
- required string app = 1;
- optional string name_space = 29;
-
- optional string kind = 3;
- optional Reference ancestor = 17;
-
- repeated group Filter = 4 {
- enum Operator {
- LESS_THAN = 1;
- LESS_THAN_OR_EQUAL = 2;
- GREATER_THAN = 3;
- GREATER_THAN_OR_EQUAL = 4;
- EQUAL = 5;
- IN = 6;
- EXISTS = 7;
- }
-
- required Operator op = 6;
- repeated Property property = 14;
- }
-
- optional string search_query = 8;
-
- repeated group Order = 9 {
- enum Direction {
- ASCENDING = 1;
- DESCENDING = 2;
- }
-
- required string property = 10;
- optional Direction direction = 11 [default = ASCENDING];
- }
-
- enum Hint {
- ORDER_FIRST = 1;
- ANCESTOR_FIRST = 2;
- FILTER_FIRST = 3;
- }
- optional Hint hint = 18;
-
- optional int32 count = 23;
-
- optional int32 offset = 12 [default = 0];
-
- optional int32 limit = 16;
-
- optional CompiledCursor compiled_cursor = 30;
- optional CompiledCursor end_compiled_cursor = 31;
-
- repeated CompositeIndex composite_index = 19;
-
- optional bool require_perfect_plan = 20 [default = false];
-
- optional bool keys_only = 21 [default = false];
-
- optional Transaction transaction = 22;
-
- optional bool compile = 25 [default = false];
-
- optional int64 failover_ms = 26;
-
- optional bool strong = 32;
-
- repeated string property_name = 33;
-
- repeated string group_by_property_name = 34;
-
- optional bool distinct = 24;
-
- optional int64 min_safe_time_seconds = 35;
-
- repeated string safe_replica_name = 36;
-
- optional bool persist_offset = 37 [default=false];
-}
-
-message CompiledQuery {
- required group PrimaryScan = 1 {
- optional string index_name = 2;
-
- optional string start_key = 3;
- optional bool start_inclusive = 4;
- optional string end_key = 5;
- optional bool end_inclusive = 6;
-
- repeated string start_postfix_value = 22;
- repeated string end_postfix_value = 23;
-
- optional int64 end_unapplied_log_timestamp_us = 19;
- }
-
- repeated group MergeJoinScan = 7 {
- required string index_name = 8;
-
- repeated string prefix_value = 9;
-
- optional bool value_prefix = 20 [default=false];
- }
-
- optional Index index_def = 21;
-
- optional int32 offset = 10 [default = 0];
-
- optional int32 limit = 11;
-
- required bool keys_only = 12;
-
- repeated string property_name = 24;
-
- optional int32 distinct_infix_size = 25;
-
- optional group EntityFilter = 13 {
- optional bool distinct = 14 [default=false];
-
- optional string kind = 17;
- optional Reference ancestor = 18;
- }
-}
-
-message CompiledCursor {
- optional group Position = 2 {
- optional string start_key = 27;
-
- repeated group IndexValue = 29 {
- optional string property = 30;
- required PropertyValue value = 31;
- }
-
- optional Reference key = 32;
-
- optional bool start_inclusive = 28 [default=true];
- }
-}
-
-message Cursor {
- required fixed64 cursor = 1;
-
- optional string app = 2;
-}
-
-message Error {
- enum ErrorCode {
- BAD_REQUEST = 1;
- CONCURRENT_TRANSACTION = 2;
- INTERNAL_ERROR = 3;
- NEED_INDEX = 4;
- TIMEOUT = 5;
- PERMISSION_DENIED = 6;
- BIGTABLE_ERROR = 7;
- COMMITTED_BUT_STILL_APPLYING = 8;
- CAPABILITY_DISABLED = 9;
- TRY_ALTERNATE_BACKEND = 10;
- SAFE_TIME_TOO_OLD = 11;
- }
-}
-
-message Cost {
- optional int32 index_writes = 1;
- optional int32 index_write_bytes = 2;
- optional int32 entity_writes = 3;
- optional int32 entity_write_bytes = 4;
- optional group CommitCost = 5 {
- optional int32 requested_entity_puts = 6;
- optional int32 requested_entity_deletes = 7;
- };
- optional int32 approximate_storage_delta = 8;
- optional int32 id_sequence_updates = 9;
-}
-
-message GetRequest {
- optional InternalHeader header = 6;
-
- repeated Reference key = 1;
- optional Transaction transaction = 2;
-
- optional int64 failover_ms = 3;
-
- optional bool strong = 4;
-
- optional bool allow_deferred = 5 [default=false];
-}
-
-message GetResponse {
- repeated group Entity = 1 {
- optional EntityProto entity = 2;
- optional Reference key = 4;
-
- optional int64 version = 3;
- }
-
- repeated Reference deferred = 5;
-
- optional bool in_order = 6 [default=true];
-}
-
-message PutRequest {
- optional InternalHeader header = 11;
-
- repeated EntityProto entity = 1;
- optional Transaction transaction = 2;
- repeated CompositeIndex composite_index = 3;
-
- optional bool trusted = 4 [default = false];
-
- optional bool force = 7 [default = false];
-
- optional bool mark_changes = 8 [default = false];
- repeated Snapshot snapshot = 9;
-
- enum AutoIdPolicy {
- CURRENT = 0;
- SEQUENTIAL = 1;
- }
- optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
-}
-
-message PutResponse {
- repeated Reference key = 1;
- optional Cost cost = 2;
- repeated int64 version = 3;
-}
-
-message TouchRequest {
- optional InternalHeader header = 10;
-
- repeated Reference key = 1;
- repeated CompositeIndex composite_index = 2;
- optional bool force = 3 [default = false];
- repeated Snapshot snapshot = 9;
-}
-
-message TouchResponse {
- optional Cost cost = 1;
-}
-
-message DeleteRequest {
- optional InternalHeader header = 10;
-
- repeated Reference key = 6;
- optional Transaction transaction = 5;
-
- optional bool trusted = 4 [default = false];
-
- optional bool force = 7 [default = false];
-
- optional bool mark_changes = 8 [default = false];
- repeated Snapshot snapshot = 9;
-}
-
-message DeleteResponse {
- optional Cost cost = 1;
- repeated int64 version = 3;
-}
-
-message NextRequest {
- optional InternalHeader header = 5;
-
- required Cursor cursor = 1;
- optional int32 count = 2;
-
- optional int32 offset = 4 [default = 0];
-
- optional bool compile = 3 [default = false];
-}
-
-message QueryResult {
- optional Cursor cursor = 1;
-
- repeated EntityProto result = 2;
-
- optional int32 skipped_results = 7;
-
- required bool more_results = 3;
-
- optional bool keys_only = 4;
-
- optional bool index_only = 9;
-
- optional bool small_ops = 10;
-
- optional CompiledQuery compiled_query = 5;
-
- optional CompiledCursor compiled_cursor = 6;
-
- repeated CompositeIndex index = 8;
-
- repeated int64 version = 11;
-}
-
-message AllocateIdsRequest {
- optional InternalHeader header = 4;
-
- optional Reference model_key = 1;
-
- optional int64 size = 2;
-
- optional int64 max = 3;
-
- repeated Reference reserve = 5;
-}
-
-message AllocateIdsResponse {
- required int64 start = 1;
- required int64 end = 2;
- optional Cost cost = 3;
-}
-
-message CompositeIndices {
- repeated CompositeIndex index = 1;
-}
-
-message AddActionsRequest {
- optional InternalHeader header = 3;
-
- required Transaction transaction = 1;
- repeated Action action = 2;
-}
-
-message AddActionsResponse {
-}
-
-message BeginTransactionRequest {
- optional InternalHeader header = 3;
-
- required string app = 1;
- optional bool allow_multiple_eg = 2 [default = false];
- optional string database_id = 4;
-
- enum TransactionMode {
- UNKNOWN = 0;
- READ_ONLY = 1;
- READ_WRITE = 2;
- }
- optional TransactionMode mode = 5 [default = UNKNOWN];
-
- optional Transaction previous_transaction = 7;
-}
-
-message CommitResponse {
- optional Cost cost = 1;
-
- repeated group Version = 3 {
- required Reference root_entity_key = 4;
- required int64 version = 5;
- }
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
deleted file mode 100644
index 9b4134e42..000000000
--- a/vendor/google.golang.org/appengine/internal/identity.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-import (
- "os"
-
- netcontext "golang.org/x/net/context"
-)
-
-var (
- // This is set to true in identity_classic.go, which is behind the appengine build tag.
- // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not
- // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a
- // first-gen runtime. See IsStandard below for the second-gen check.
- appengineStandard bool
-
- // This is set to true in identity_flex.go, which is behind the appenginevm build tag.
- appengineFlex bool
-)
-
-// AppID is the implementation of the wrapper function of the same name in
-// ../identity.go. See that file for commentary.
-func AppID(c netcontext.Context) string {
- return appID(FullyQualifiedAppID(c))
-}
-
-// IsStandard is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsStandard() bool {
- // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not
- // second-gen (>= Go 1.11).
- return appengineStandard || IsSecondGen()
-}
-
-// IsStandard is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsSecondGen() bool {
- // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime.
- return os.Getenv("GAE_ENV") == "standard"
-}
-
-// IsFlex is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsFlex() bool {
- return appengineFlex
-}
-
-// IsAppEngine is the implementation of the wrapper function of the same name in
-// ../appengine.go. See that file for commentary.
-func IsAppEngine() bool {
- return IsStandard() || IsFlex()
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
deleted file mode 100644
index 4e979f45e..000000000
--- a/vendor/google.golang.org/appengine/internal/identity_classic.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2015 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import (
- "appengine"
-
- netcontext "golang.org/x/net/context"
-)
-
-func init() {
- appengineStandard = true
-}
-
-func DefaultVersionHostname(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return appengine.DefaultVersionHostname(c)
-}
-
-func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
-func ServerSoftware() string { return appengine.ServerSoftware() }
-func InstanceID() string { return appengine.InstanceID() }
-func IsDevAppServer() bool { return appengine.IsDevAppServer() }
-
-func RequestID(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return appengine.RequestID(c)
-}
-
-func ModuleName(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return appengine.ModuleName(c)
-}
-func VersionID(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return appengine.VersionID(c)
-}
-
-func fullyQualifiedAppID(ctx netcontext.Context) string {
- c := fromContext(ctx)
- if c == nil {
- panic(errNotAppEngineContext)
- }
- return c.FullyQualifiedAppID()
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go
deleted file mode 100644
index d5e2e7b5e..000000000
--- a/vendor/google.golang.org/appengine/internal/identity_flex.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright 2018 Google LLC. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appenginevm
-
-package internal
-
-func init() {
- appengineFlex = true
-}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
deleted file mode 100644
index 5d8067263..000000000
--- a/vendor/google.golang.org/appengine/internal/identity_vm.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "log"
- "net/http"
- "os"
- "strings"
-
- netcontext "golang.org/x/net/context"
-)
-
-// These functions are implementations of the wrapper functions
-// in ../appengine/identity.go. See that file for commentary.
-
-const (
- hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
- hRequestLogId = "X-AppEngine-Request-Log-Id"
- hDatacenter = "X-AppEngine-Datacenter"
-)
-
-func ctxHeaders(ctx netcontext.Context) http.Header {
- c := fromContext(ctx)
- if c == nil {
- return nil
- }
- return c.Request().Header
-}
-
-func DefaultVersionHostname(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hDefaultVersionHostname)
-}
-
-func RequestID(ctx netcontext.Context) string {
- return ctxHeaders(ctx).Get(hRequestLogId)
-}
-
-func Datacenter(ctx netcontext.Context) string {
- if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" {
- return dc
- }
- // If the header isn't set, read zone from the metadata service.
- // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE]
- zone, err := getMetadata("instance/zone")
- if err != nil {
- log.Printf("Datacenter: %v", err)
- return ""
- }
- parts := strings.Split(string(zone), "/")
- if len(parts) == 0 {
- return ""
- }
- return parts[len(parts)-1]
-}
-
-func ServerSoftware() string {
- // TODO(dsymonds): Remove fallback when we've verified this.
- if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
- return s
- }
- if s := os.Getenv("GAE_ENV"); s != "" {
- return s
- }
- return "Google App Engine/1.x.x"
-}
-
-// TODO(dsymonds): Remove the metadata fetches.
-
-func ModuleName(_ netcontext.Context) string {
- if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
- return s
- }
- if s := os.Getenv("GAE_SERVICE"); s != "" {
- return s
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_name"))
-}
-
-func VersionID(_ netcontext.Context) string {
- if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
- return s1 + "." + s2
- }
- if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" {
- return s1 + "." + s2
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
-}
-
-func InstanceID() string {
- if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
- return s
- }
- if s := os.Getenv("GAE_INSTANCE"); s != "" {
- return s
- }
- return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
-}
-
-func partitionlessAppID() string {
- // gae_project has everything except the partition prefix.
- if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" {
- return appID
- }
- if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" {
- return project
- }
- return string(mustGetMetadata("instance/attributes/gae_project"))
-}
-
-func fullyQualifiedAppID(_ netcontext.Context) string {
- if s := os.Getenv("GAE_APPLICATION"); s != "" {
- return s
- }
- appID := partitionlessAppID()
-
- part := os.Getenv("GAE_PARTITION")
- if part == "" {
- part = string(mustGetMetadata("instance/attributes/gae_partition"))
- }
-
- if part != "" {
- appID = part + "~" + appID
- }
- return appID
-}
-
-func IsDevAppServer() bool {
- return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
-}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
deleted file mode 100644
index 051ea3980..000000000
--- a/vendor/google.golang.org/appengine/internal/internal.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package internal provides support for package appengine.
-//
-// Programs should not use this package directly. Its API is not stable.
-// Use packages appengine and appengine/* instead.
-package internal
-
-import (
- "fmt"
-
- "github.com/golang/protobuf/proto"
-
- remotepb "google.golang.org/appengine/internal/remote_api"
-)
-
-// errorCodeMaps is a map of service name to the error code map for the service.
-var errorCodeMaps = make(map[string]map[int32]string)
-
-// RegisterErrorCodeMap is called from API implementations to register their
-// error code map. This should only be called from init functions.
-func RegisterErrorCodeMap(service string, m map[int32]string) {
- errorCodeMaps[service] = m
-}
-
-type timeoutCodeKey struct {
- service string
- code int32
-}
-
-// timeoutCodes is the set of service+code pairs that represent timeouts.
-var timeoutCodes = make(map[timeoutCodeKey]bool)
-
-func RegisterTimeoutErrorCode(service string, code int32) {
- timeoutCodes[timeoutCodeKey{service, code}] = true
-}
-
-// APIError is the type returned by appengine.Context's Call method
-// when an API call fails in an API-specific way. This may be, for instance,
-// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
-type APIError struct {
- Service string
- Detail string
- Code int32 // API-specific error code
-}
-
-func (e *APIError) Error() string {
- if e.Code == 0 {
- if e.Detail == "" {
- return "APIError "
- }
- return e.Detail
- }
- s := fmt.Sprintf("API error %d", e.Code)
- if m, ok := errorCodeMaps[e.Service]; ok {
- s += " (" + e.Service + ": " + m[e.Code] + ")"
- } else {
- // Shouldn't happen, but provide a bit more detail if it does.
- s = e.Service + " " + s
- }
- if e.Detail != "" {
- s += ": " + e.Detail
- }
- return s
-}
-
-func (e *APIError) IsTimeout() bool {
- return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
-}
-
-// CallError is the type returned by appengine.Context's Call method when an
-// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
-type CallError struct {
- Detail string
- Code int32
- // TODO: Remove this if we get a distinguishable error code.
- Timeout bool
-}
-
-func (e *CallError) Error() string {
- var msg string
- switch remotepb.RpcError_ErrorCode(e.Code) {
- case remotepb.RpcError_UNKNOWN:
- return e.Detail
- case remotepb.RpcError_OVER_QUOTA:
- msg = "Over quota"
- case remotepb.RpcError_CAPABILITY_DISABLED:
- msg = "Capability disabled"
- case remotepb.RpcError_CANCELLED:
- msg = "Canceled"
- default:
- msg = fmt.Sprintf("Call error %d", e.Code)
- }
- s := msg + ": " + e.Detail
- if e.Timeout {
- s += " (timeout)"
- }
- return s
-}
-
-func (e *CallError) IsTimeout() bool {
- return e.Timeout
-}
-
-// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
-// The function should be prepared to be called on the same message more than once; it should only modify the
-// RPC request the first time.
-var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
deleted file mode 100644
index 8545ac4ad..000000000
--- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
+++ /dev/null
@@ -1,1313 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/log/log_service.proto
-
-package log
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type LogServiceError_ErrorCode int32
-
-const (
- LogServiceError_OK LogServiceError_ErrorCode = 0
- LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
- LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
-)
-
-var LogServiceError_ErrorCode_name = map[int32]string{
- 0: "OK",
- 1: "INVALID_REQUEST",
- 2: "STORAGE_ERROR",
-}
-var LogServiceError_ErrorCode_value = map[string]int32{
- "OK": 0,
- "INVALID_REQUEST": 1,
- "STORAGE_ERROR": 2,
-}
-
-func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
- p := new(LogServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x LogServiceError_ErrorCode) String() string {
- return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
-}
-func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = LogServiceError_ErrorCode(value)
- return nil
-}
-func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0}
-}
-
-type LogServiceError struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogServiceError) Reset() { *m = LogServiceError{} }
-func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
-func (*LogServiceError) ProtoMessage() {}
-func (*LogServiceError) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{0}
-}
-func (m *LogServiceError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogServiceError.Unmarshal(m, b)
-}
-func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic)
-}
-func (dst *LogServiceError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogServiceError.Merge(dst, src)
-}
-func (m *LogServiceError) XXX_Size() int {
- return xxx_messageInfo_LogServiceError.Size(m)
-}
-func (m *LogServiceError) XXX_DiscardUnknown() {
- xxx_messageInfo_LogServiceError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogServiceError proto.InternalMessageInfo
-
-type UserAppLogLine struct {
- TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"`
- Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
-func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogLine) ProtoMessage() {}
-func (*UserAppLogLine) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{1}
-}
-func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b)
-}
-func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic)
-}
-func (dst *UserAppLogLine) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserAppLogLine.Merge(dst, src)
-}
-func (m *UserAppLogLine) XXX_Size() int {
- return xxx_messageInfo_UserAppLogLine.Size(m)
-}
-func (m *UserAppLogLine) XXX_DiscardUnknown() {
- xxx_messageInfo_UserAppLogLine.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo
-
-func (m *UserAppLogLine) GetTimestampUsec() int64 {
- if m != nil && m.TimestampUsec != nil {
- return *m.TimestampUsec
- }
- return 0
-}
-
-func (m *UserAppLogLine) GetLevel() int64 {
- if m != nil && m.Level != nil {
- return *m.Level
- }
- return 0
-}
-
-func (m *UserAppLogLine) GetMessage() string {
- if m != nil && m.Message != nil {
- return *m.Message
- }
- return ""
-}
-
-type UserAppLogGroup struct {
- LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
-func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
-func (*UserAppLogGroup) ProtoMessage() {}
-func (*UserAppLogGroup) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{2}
-}
-func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b)
-}
-func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic)
-}
-func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UserAppLogGroup.Merge(dst, src)
-}
-func (m *UserAppLogGroup) XXX_Size() int {
- return xxx_messageInfo_UserAppLogGroup.Size(m)
-}
-func (m *UserAppLogGroup) XXX_DiscardUnknown() {
- xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo
-
-func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
- if m != nil {
- return m.LogLine
- }
- return nil
-}
-
-type FlushRequest struct {
- Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FlushRequest) Reset() { *m = FlushRequest{} }
-func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
-func (*FlushRequest) ProtoMessage() {}
-func (*FlushRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{3}
-}
-func (m *FlushRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FlushRequest.Unmarshal(m, b)
-}
-func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic)
-}
-func (dst *FlushRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FlushRequest.Merge(dst, src)
-}
-func (m *FlushRequest) XXX_Size() int {
- return xxx_messageInfo_FlushRequest.Size(m)
-}
-func (m *FlushRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_FlushRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FlushRequest proto.InternalMessageInfo
-
-func (m *FlushRequest) GetLogs() []byte {
- if m != nil {
- return m.Logs
- }
- return nil
-}
-
-type SetStatusRequest struct {
- Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
-func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
-func (*SetStatusRequest) ProtoMessage() {}
-func (*SetStatusRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{4}
-}
-func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b)
-}
-func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic)
-}
-func (dst *SetStatusRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SetStatusRequest.Merge(dst, src)
-}
-func (m *SetStatusRequest) XXX_Size() int {
- return xxx_messageInfo_SetStatusRequest.Size(m)
-}
-func (m *SetStatusRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_SetStatusRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo
-
-func (m *SetStatusRequest) GetStatus() string {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return ""
-}
-
-type LogOffset struct {
- RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogOffset) Reset() { *m = LogOffset{} }
-func (m *LogOffset) String() string { return proto.CompactTextString(m) }
-func (*LogOffset) ProtoMessage() {}
-func (*LogOffset) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{5}
-}
-func (m *LogOffset) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogOffset.Unmarshal(m, b)
-}
-func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic)
-}
-func (dst *LogOffset) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogOffset.Merge(dst, src)
-}
-func (m *LogOffset) XXX_Size() int {
- return xxx_messageInfo_LogOffset.Size(m)
-}
-func (m *LogOffset) XXX_DiscardUnknown() {
- xxx_messageInfo_LogOffset.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogOffset proto.InternalMessageInfo
-
-func (m *LogOffset) GetRequestId() []byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-type LogLine struct {
- Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
- Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
- LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogLine) Reset() { *m = LogLine{} }
-func (m *LogLine) String() string { return proto.CompactTextString(m) }
-func (*LogLine) ProtoMessage() {}
-func (*LogLine) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{6}
-}
-func (m *LogLine) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogLine.Unmarshal(m, b)
-}
-func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogLine.Marshal(b, m, deterministic)
-}
-func (dst *LogLine) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogLine.Merge(dst, src)
-}
-func (m *LogLine) XXX_Size() int {
- return xxx_messageInfo_LogLine.Size(m)
-}
-func (m *LogLine) XXX_DiscardUnknown() {
- xxx_messageInfo_LogLine.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogLine proto.InternalMessageInfo
-
-func (m *LogLine) GetTime() int64 {
- if m != nil && m.Time != nil {
- return *m.Time
- }
- return 0
-}
-
-func (m *LogLine) GetLevel() int32 {
- if m != nil && m.Level != nil {
- return *m.Level
- }
- return 0
-}
-
-func (m *LogLine) GetLogMessage() string {
- if m != nil && m.LogMessage != nil {
- return *m.LogMessage
- }
- return ""
-}
-
-type RequestLog struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
- VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"`
- RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"`
- Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
- Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
- Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
- StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"`
- Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
- Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
- Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
- Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
- HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"`
- Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
- ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"`
- Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
- UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"`
- UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"`
- Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
- ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"`
- Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
- Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
- TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"`
- TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"`
- WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"`
- PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"`
- ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"`
- Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
- CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"`
- Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
- LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"`
- AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"`
- ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"`
- WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"`
- WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"`
- ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
- ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RequestLog) Reset() { *m = RequestLog{} }
-func (m *RequestLog) String() string { return proto.CompactTextString(m) }
-func (*RequestLog) ProtoMessage() {}
-func (*RequestLog) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{7}
-}
-func (m *RequestLog) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RequestLog.Unmarshal(m, b)
-}
-func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic)
-}
-func (dst *RequestLog) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RequestLog.Merge(dst, src)
-}
-func (m *RequestLog) XXX_Size() int {
- return xxx_messageInfo_RequestLog.Size(m)
-}
-func (m *RequestLog) XXX_DiscardUnknown() {
- xxx_messageInfo_RequestLog.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestLog proto.InternalMessageInfo
-
-const Default_RequestLog_ModuleId string = "default"
-const Default_RequestLog_ReplicaIndex int32 = -1
-const Default_RequestLog_Finished bool = true
-
-func (m *RequestLog) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *RequestLog) GetModuleId() string {
- if m != nil && m.ModuleId != nil {
- return *m.ModuleId
- }
- return Default_RequestLog_ModuleId
-}
-
-func (m *RequestLog) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-func (m *RequestLog) GetRequestId() []byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-func (m *RequestLog) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *RequestLog) GetIp() string {
- if m != nil && m.Ip != nil {
- return *m.Ip
- }
- return ""
-}
-
-func (m *RequestLog) GetNickname() string {
- if m != nil && m.Nickname != nil {
- return *m.Nickname
- }
- return ""
-}
-
-func (m *RequestLog) GetStartTime() int64 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *RequestLog) GetEndTime() int64 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *RequestLog) GetLatency() int64 {
- if m != nil && m.Latency != nil {
- return *m.Latency
- }
- return 0
-}
-
-func (m *RequestLog) GetMcycles() int64 {
- if m != nil && m.Mcycles != nil {
- return *m.Mcycles
- }
- return 0
-}
-
-func (m *RequestLog) GetMethod() string {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return ""
-}
-
-func (m *RequestLog) GetResource() string {
- if m != nil && m.Resource != nil {
- return *m.Resource
- }
- return ""
-}
-
-func (m *RequestLog) GetHttpVersion() string {
- if m != nil && m.HttpVersion != nil {
- return *m.HttpVersion
- }
- return ""
-}
-
-func (m *RequestLog) GetStatus() int32 {
- if m != nil && m.Status != nil {
- return *m.Status
- }
- return 0
-}
-
-func (m *RequestLog) GetResponseSize() int64 {
- if m != nil && m.ResponseSize != nil {
- return *m.ResponseSize
- }
- return 0
-}
-
-func (m *RequestLog) GetReferrer() string {
- if m != nil && m.Referrer != nil {
- return *m.Referrer
- }
- return ""
-}
-
-func (m *RequestLog) GetUserAgent() string {
- if m != nil && m.UserAgent != nil {
- return *m.UserAgent
- }
- return ""
-}
-
-func (m *RequestLog) GetUrlMapEntry() string {
- if m != nil && m.UrlMapEntry != nil {
- return *m.UrlMapEntry
- }
- return ""
-}
-
-func (m *RequestLog) GetCombined() string {
- if m != nil && m.Combined != nil {
- return *m.Combined
- }
- return ""
-}
-
-func (m *RequestLog) GetApiMcycles() int64 {
- if m != nil && m.ApiMcycles != nil {
- return *m.ApiMcycles
- }
- return 0
-}
-
-func (m *RequestLog) GetHost() string {
- if m != nil && m.Host != nil {
- return *m.Host
- }
- return ""
-}
-
-func (m *RequestLog) GetCost() float64 {
- if m != nil && m.Cost != nil {
- return *m.Cost
- }
- return 0
-}
-
-func (m *RequestLog) GetTaskQueueName() string {
- if m != nil && m.TaskQueueName != nil {
- return *m.TaskQueueName
- }
- return ""
-}
-
-func (m *RequestLog) GetTaskName() string {
- if m != nil && m.TaskName != nil {
- return *m.TaskName
- }
- return ""
-}
-
-func (m *RequestLog) GetWasLoadingRequest() bool {
- if m != nil && m.WasLoadingRequest != nil {
- return *m.WasLoadingRequest
- }
- return false
-}
-
-func (m *RequestLog) GetPendingTime() int64 {
- if m != nil && m.PendingTime != nil {
- return *m.PendingTime
- }
- return 0
-}
-
-func (m *RequestLog) GetReplicaIndex() int32 {
- if m != nil && m.ReplicaIndex != nil {
- return *m.ReplicaIndex
- }
- return Default_RequestLog_ReplicaIndex
-}
-
-func (m *RequestLog) GetFinished() bool {
- if m != nil && m.Finished != nil {
- return *m.Finished
- }
- return Default_RequestLog_Finished
-}
-
-func (m *RequestLog) GetCloneKey() []byte {
- if m != nil {
- return m.CloneKey
- }
- return nil
-}
-
-func (m *RequestLog) GetLine() []*LogLine {
- if m != nil {
- return m.Line
- }
- return nil
-}
-
-func (m *RequestLog) GetLinesIncomplete() bool {
- if m != nil && m.LinesIncomplete != nil {
- return *m.LinesIncomplete
- }
- return false
-}
-
-func (m *RequestLog) GetAppEngineRelease() []byte {
- if m != nil {
- return m.AppEngineRelease
- }
- return nil
-}
-
-func (m *RequestLog) GetExitReason() int32 {
- if m != nil && m.ExitReason != nil {
- return *m.ExitReason
- }
- return 0
-}
-
-func (m *RequestLog) GetWasThrottledForTime() bool {
- if m != nil && m.WasThrottledForTime != nil {
- return *m.WasThrottledForTime
- }
- return false
-}
-
-func (m *RequestLog) GetWasThrottledForRequests() bool {
- if m != nil && m.WasThrottledForRequests != nil {
- return *m.WasThrottledForRequests
- }
- return false
-}
-
-func (m *RequestLog) GetThrottledTime() int64 {
- if m != nil && m.ThrottledTime != nil {
- return *m.ThrottledTime
- }
- return 0
-}
-
-func (m *RequestLog) GetServerName() []byte {
- if m != nil {
- return m.ServerName
- }
- return nil
-}
-
-type LogModuleVersion struct {
- ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"`
- VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
-func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
-func (*LogModuleVersion) ProtoMessage() {}
-func (*LogModuleVersion) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{8}
-}
-func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b)
-}
-func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic)
-}
-func (dst *LogModuleVersion) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogModuleVersion.Merge(dst, src)
-}
-func (m *LogModuleVersion) XXX_Size() int {
- return xxx_messageInfo_LogModuleVersion.Size(m)
-}
-func (m *LogModuleVersion) XXX_DiscardUnknown() {
- xxx_messageInfo_LogModuleVersion.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo
-
-const Default_LogModuleVersion_ModuleId string = "default"
-
-func (m *LogModuleVersion) GetModuleId() string {
- if m != nil && m.ModuleId != nil {
- return *m.ModuleId
- }
- return Default_LogModuleVersion_ModuleId
-}
-
-func (m *LogModuleVersion) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-type LogReadRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
- ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"`
- StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
- RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"`
- MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"`
- IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"`
- Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
- CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"`
- HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"`
- ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"`
- IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"`
- AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"`
- IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"`
- IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"`
- CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"`
- NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
-func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
-func (*LogReadRequest) ProtoMessage() {}
-func (*LogReadRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{9}
-}
-func (m *LogReadRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogReadRequest.Unmarshal(m, b)
-}
-func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic)
-}
-func (dst *LogReadRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogReadRequest.Merge(dst, src)
-}
-func (m *LogReadRequest) XXX_Size() int {
- return xxx_messageInfo_LogReadRequest.Size(m)
-}
-func (m *LogReadRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LogReadRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo
-
-func (m *LogReadRequest) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *LogReadRequest) GetVersionId() []string {
- if m != nil {
- return m.VersionId
- }
- return nil
-}
-
-func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
- if m != nil {
- return m.ModuleVersion
- }
- return nil
-}
-
-func (m *LogReadRequest) GetStartTime() int64 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogReadRequest) GetEndTime() int64 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogReadRequest) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *LogReadRequest) GetRequestId() [][]byte {
- if m != nil {
- return m.RequestId
- }
- return nil
-}
-
-func (m *LogReadRequest) GetMinimumLogLevel() int32 {
- if m != nil && m.MinimumLogLevel != nil {
- return *m.MinimumLogLevel
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeIncomplete() bool {
- if m != nil && m.IncludeIncomplete != nil {
- return *m.IncludeIncomplete
- }
- return false
-}
-
-func (m *LogReadRequest) GetCount() int64 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *LogReadRequest) GetCombinedLogRegex() string {
- if m != nil && m.CombinedLogRegex != nil {
- return *m.CombinedLogRegex
- }
- return ""
-}
-
-func (m *LogReadRequest) GetHostRegex() string {
- if m != nil && m.HostRegex != nil {
- return *m.HostRegex
- }
- return ""
-}
-
-func (m *LogReadRequest) GetReplicaIndex() int32 {
- if m != nil && m.ReplicaIndex != nil {
- return *m.ReplicaIndex
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeAppLogs() bool {
- if m != nil && m.IncludeAppLogs != nil {
- return *m.IncludeAppLogs
- }
- return false
-}
-
-func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
- if m != nil && m.AppLogsPerRequest != nil {
- return *m.AppLogsPerRequest
- }
- return 0
-}
-
-func (m *LogReadRequest) GetIncludeHost() bool {
- if m != nil && m.IncludeHost != nil {
- return *m.IncludeHost
- }
- return false
-}
-
-func (m *LogReadRequest) GetIncludeAll() bool {
- if m != nil && m.IncludeAll != nil {
- return *m.IncludeAll
- }
- return false
-}
-
-func (m *LogReadRequest) GetCacheIterator() bool {
- if m != nil && m.CacheIterator != nil {
- return *m.CacheIterator
- }
- return false
-}
-
-func (m *LogReadRequest) GetNumShards() int32 {
- if m != nil && m.NumShards != nil {
- return *m.NumShards
- }
- return 0
-}
-
-type LogReadResponse struct {
- Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
- Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
- LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
-func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
-func (*LogReadResponse) ProtoMessage() {}
-func (*LogReadResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{10}
-}
-func (m *LogReadResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogReadResponse.Unmarshal(m, b)
-}
-func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic)
-}
-func (dst *LogReadResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogReadResponse.Merge(dst, src)
-}
-func (m *LogReadResponse) XXX_Size() int {
- return xxx_messageInfo_LogReadResponse.Size(m)
-}
-func (m *LogReadResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LogReadResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo
-
-func (m *LogReadResponse) GetLog() []*RequestLog {
- if m != nil {
- return m.Log
- }
- return nil
-}
-
-func (m *LogReadResponse) GetOffset() *LogOffset {
- if m != nil {
- return m.Offset
- }
- return nil
-}
-
-func (m *LogReadResponse) GetLastEndTime() int64 {
- if m != nil && m.LastEndTime != nil {
- return *m.LastEndTime
- }
- return 0
-}
-
-type LogUsageRecord struct {
- VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
- TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"`
- Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
-func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRecord) ProtoMessage() {}
-func (*LogUsageRecord) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{11}
-}
-func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b)
-}
-func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageRecord) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogUsageRecord.Merge(dst, src)
-}
-func (m *LogUsageRecord) XXX_Size() int {
- return xxx_messageInfo_LogUsageRecord.Size(m)
-}
-func (m *LogUsageRecord) XXX_DiscardUnknown() {
- xxx_messageInfo_LogUsageRecord.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo
-
-func (m *LogUsageRecord) GetVersionId() string {
- if m != nil && m.VersionId != nil {
- return *m.VersionId
- }
- return ""
-}
-
-func (m *LogUsageRecord) GetStartTime() int32 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetEndTime() int32 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetCount() int64 {
- if m != nil && m.Count != nil {
- return *m.Count
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetTotalSize() int64 {
- if m != nil && m.TotalSize != nil {
- return *m.TotalSize
- }
- return 0
-}
-
-func (m *LogUsageRecord) GetRecords() int32 {
- if m != nil && m.Records != nil {
- return *m.Records
- }
- return 0
-}
-
-type LogUsageRequest struct {
- AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"`
- VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"`
- StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"`
- EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"`
- ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"`
- CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"`
- UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"`
- VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
-func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
-func (*LogUsageRequest) ProtoMessage() {}
-func (*LogUsageRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{12}
-}
-func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b)
-}
-func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogUsageRequest.Merge(dst, src)
-}
-func (m *LogUsageRequest) XXX_Size() int {
- return xxx_messageInfo_LogUsageRequest.Size(m)
-}
-func (m *LogUsageRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LogUsageRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo
-
-const Default_LogUsageRequest_ResolutionHours uint32 = 1
-
-func (m *LogUsageRequest) GetAppId() string {
- if m != nil && m.AppId != nil {
- return *m.AppId
- }
- return ""
-}
-
-func (m *LogUsageRequest) GetVersionId() []string {
- if m != nil {
- return m.VersionId
- }
- return nil
-}
-
-func (m *LogUsageRequest) GetStartTime() int32 {
- if m != nil && m.StartTime != nil {
- return *m.StartTime
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetEndTime() int32 {
- if m != nil && m.EndTime != nil {
- return *m.EndTime
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetResolutionHours() uint32 {
- if m != nil && m.ResolutionHours != nil {
- return *m.ResolutionHours
- }
- return Default_LogUsageRequest_ResolutionHours
-}
-
-func (m *LogUsageRequest) GetCombineVersions() bool {
- if m != nil && m.CombineVersions != nil {
- return *m.CombineVersions
- }
- return false
-}
-
-func (m *LogUsageRequest) GetUsageVersion() int32 {
- if m != nil && m.UsageVersion != nil {
- return *m.UsageVersion
- }
- return 0
-}
-
-func (m *LogUsageRequest) GetVersionsOnly() bool {
- if m != nil && m.VersionsOnly != nil {
- return *m.VersionsOnly
- }
- return false
-}
-
-type LogUsageResponse struct {
- Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
- Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
-func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
-func (*LogUsageResponse) ProtoMessage() {}
-func (*LogUsageResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_log_service_f054fd4b5012319d, []int{13}
-}
-func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b)
-}
-func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic)
-}
-func (dst *LogUsageResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LogUsageResponse.Merge(dst, src)
-}
-func (m *LogUsageResponse) XXX_Size() int {
- return xxx_messageInfo_LogUsageResponse.Size(m)
-}
-func (m *LogUsageResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LogUsageResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo
-
-func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
- if m != nil {
- return m.Usage
- }
- return nil
-}
-
-func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
- if m != nil {
- return m.Summary
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError")
- proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine")
- proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup")
- proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest")
- proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest")
- proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset")
- proto.RegisterType((*LogLine)(nil), "appengine.LogLine")
- proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog")
- proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion")
- proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest")
- proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse")
- proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord")
- proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest")
- proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d)
-}
-
-var fileDescriptor_log_service_f054fd4b5012319d = []byte{
- // 1553 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6,
- 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e,
- 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40,
- 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72,
- 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d,
- 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9,
- 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32,
- 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5,
- 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3,
- 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17,
- 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f,
- 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3,
- 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46,
- 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26,
- 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31,
- 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5,
- 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd,
- 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46,
- 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe,
- 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc,
- 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1,
- 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f,
- 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39,
- 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35,
- 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd,
- 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b,
- 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c,
- 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0,
- 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36,
- 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f,
- 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a,
- 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed,
- 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95,
- 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91,
- 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87,
- 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1,
- 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5,
- 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf,
- 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce,
- 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66,
- 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0,
- 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62,
- 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c,
- 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba,
- 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a,
- 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa,
- 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4,
- 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1,
- 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62,
- 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b,
- 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4,
- 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90,
- 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78,
- 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff,
- 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56,
- 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5,
- 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b,
- 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21,
- 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69,
- 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98,
- 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51,
- 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17,
- 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c,
- 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f,
- 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f,
- 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b,
- 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10,
- 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54,
- 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d,
- 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98,
- 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0,
- 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0,
- 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61,
- 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83,
- 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3,
- 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75,
- 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38,
- 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37,
- 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e,
- 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d,
- 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65,
- 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43,
- 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06,
- 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2,
- 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea,
- 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64,
- 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1,
- 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf,
- 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36,
- 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53,
- 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7,
- 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56,
- 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b,
- 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77,
- 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05,
- 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd,
- 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00,
- 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
deleted file mode 100644
index 8981dc475..000000000
--- a/vendor/google.golang.org/appengine/internal/log/log_service.proto
+++ /dev/null
@@ -1,150 +0,0 @@
-syntax = "proto2";
-option go_package = "log";
-
-package appengine;
-
-message LogServiceError {
- enum ErrorCode {
- OK = 0;
- INVALID_REQUEST = 1;
- STORAGE_ERROR = 2;
- }
-}
-
-message UserAppLogLine {
- required int64 timestamp_usec = 1;
- required int64 level = 2;
- required string message = 3;
-}
-
-message UserAppLogGroup {
- repeated UserAppLogLine log_line = 2;
-}
-
-message FlushRequest {
- optional bytes logs = 1;
-}
-
-message SetStatusRequest {
- required string status = 1;
-}
-
-
-message LogOffset {
- optional bytes request_id = 1;
-}
-
-message LogLine {
- required int64 time = 1;
- required int32 level = 2;
- required string log_message = 3;
-}
-
-message RequestLog {
- required string app_id = 1;
- optional string module_id = 37 [default="default"];
- required string version_id = 2;
- required bytes request_id = 3;
- optional LogOffset offset = 35;
- required string ip = 4;
- optional string nickname = 5;
- required int64 start_time = 6;
- required int64 end_time = 7;
- required int64 latency = 8;
- required int64 mcycles = 9;
- required string method = 10;
- required string resource = 11;
- required string http_version = 12;
- required int32 status = 13;
- required int64 response_size = 14;
- optional string referrer = 15;
- optional string user_agent = 16;
- required string url_map_entry = 17;
- required string combined = 18;
- optional int64 api_mcycles = 19;
- optional string host = 20;
- optional double cost = 21;
-
- optional string task_queue_name = 22;
- optional string task_name = 23;
-
- optional bool was_loading_request = 24;
- optional int64 pending_time = 25;
- optional int32 replica_index = 26 [default = -1];
- optional bool finished = 27 [default = true];
- optional bytes clone_key = 28;
-
- repeated LogLine line = 29;
-
- optional bool lines_incomplete = 36;
- optional bytes app_engine_release = 38;
-
- optional int32 exit_reason = 30;
- optional bool was_throttled_for_time = 31;
- optional bool was_throttled_for_requests = 32;
- optional int64 throttled_time = 33;
-
- optional bytes server_name = 34;
-}
-
-message LogModuleVersion {
- optional string module_id = 1 [default="default"];
- optional string version_id = 2;
-}
-
-message LogReadRequest {
- required string app_id = 1;
- repeated string version_id = 2;
- repeated LogModuleVersion module_version = 19;
-
- optional int64 start_time = 3;
- optional int64 end_time = 4;
- optional LogOffset offset = 5;
- repeated bytes request_id = 6;
-
- optional int32 minimum_log_level = 7;
- optional bool include_incomplete = 8;
- optional int64 count = 9;
-
- optional string combined_log_regex = 14;
- optional string host_regex = 15;
- optional int32 replica_index = 16;
-
- optional bool include_app_logs = 10;
- optional int32 app_logs_per_request = 17;
- optional bool include_host = 11;
- optional bool include_all = 12;
- optional bool cache_iterator = 13;
- optional int32 num_shards = 18;
-}
-
-message LogReadResponse {
- repeated RequestLog log = 1;
- optional LogOffset offset = 2;
- optional int64 last_end_time = 3;
-}
-
-message LogUsageRecord {
- optional string version_id = 1;
- optional int32 start_time = 2;
- optional int32 end_time = 3;
- optional int64 count = 4;
- optional int64 total_size = 5;
- optional int32 records = 6;
-}
-
-message LogUsageRequest {
- required string app_id = 1;
- repeated string version_id = 2;
- optional int32 start_time = 3;
- optional int32 end_time = 4;
- optional uint32 resolution_hours = 5 [default = 1];
- optional bool combine_versions = 6;
- optional int32 usage_version = 7;
- optional bool versions_only = 8;
-}
-
-message LogUsageResponse {
- repeated LogUsageRecord usage = 1;
- optional LogUsageRecord summary = 2;
-}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
deleted file mode 100644
index 1e765312f..000000000
--- a/vendor/google.golang.org/appengine/internal/main.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build appengine
-
-package internal
-
-import (
- "appengine_internal"
-)
-
-func Main() {
- MainPath = ""
- appengine_internal.Main()
-}
diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go
deleted file mode 100644
index 357dce4dd..000000000
--- a/vendor/google.golang.org/appengine/internal/main_common.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package internal
-
-// MainPath stores the file path of the main package. On App Engine Standard
-// using Go version 1.9 and below, this will be unset. On App Engine Flex and
-// App Engine Standard second-gen (Go 1.11 and above), this will be the
-// filepath to package main.
-var MainPath string
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
deleted file mode 100644
index ddb79a333..000000000
--- a/vendor/google.golang.org/appengine/internal/main_vm.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package internal
-
-import (
- "io"
- "log"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "runtime"
-)
-
-func Main() {
- MainPath = filepath.Dir(findMainPath())
- installHealthChecker(http.DefaultServeMux)
-
- port := "8080"
- if s := os.Getenv("PORT"); s != "" {
- port = s
- }
-
- host := ""
- if IsDevAppServer() {
- host = "127.0.0.1"
- }
- if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
- log.Fatalf("http.ListenAndServe: %v", err)
- }
-}
-
-// Find the path to package main by looking at the root Caller.
-func findMainPath() string {
- pc := make([]uintptr, 100)
- n := runtime.Callers(2, pc)
- frames := runtime.CallersFrames(pc[:n])
- for {
- frame, more := frames.Next()
- // Tests won't have package main, instead they have testing.tRunner
- if frame.Function == "main.main" || frame.Function == "testing.tRunner" {
- return frame.File
- }
- if !more {
- break
- }
- }
- return ""
-}
-
-func installHealthChecker(mux *http.ServeMux) {
- // If no health check handler has been installed by this point, add a trivial one.
- const healthPath = "/_ah/health"
- hreq := &http.Request{
- Method: "GET",
- URL: &url.URL{
- Path: healthPath,
- },
- }
- if _, pat := mux.Handler(hreq); pat != healthPath {
- mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, "ok")
- })
- }
-}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
deleted file mode 100644
index c4ba63bb4..000000000
--- a/vendor/google.golang.org/appengine/internal/metadata.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file has code for accessing metadata.
-//
-// References:
-// https://cloud.google.com/compute/docs/metadata
-
-import (
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
-)
-
-const (
- metadataHost = "metadata"
- metadataPath = "/computeMetadata/v1/"
-)
-
-var (
- metadataRequestHeaders = http.Header{
- "Metadata-Flavor": []string{"Google"},
- }
-)
-
-// TODO(dsymonds): Do we need to support default values, like Python?
-func mustGetMetadata(key string) []byte {
- b, err := getMetadata(key)
- if err != nil {
- panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err))
- }
- return b
-}
-
-func getMetadata(key string) ([]byte, error) {
- // TODO(dsymonds): May need to use url.Parse to support keys with query args.
- req := &http.Request{
- Method: "GET",
- URL: &url.URL{
- Scheme: "http",
- Host: metadataHost,
- Path: metadataPath + key,
- },
- Header: metadataRequestHeaders,
- Host: metadataHost,
- }
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
- if resp.StatusCode != 200 {
- return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
- }
- return ioutil.ReadAll(resp.Body)
-}
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
deleted file mode 100644
index fe429720e..000000000
--- a/vendor/google.golang.org/appengine/internal/net.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements a network dialer that limits the number of concurrent connections.
-// It is only used for API calls.
-
-import (
- "log"
- "net"
- "runtime"
- "sync"
- "time"
-)
-
-var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
-
-func limitRelease() {
- // non-blocking
- select {
- case <-limitSem:
- default:
- // This should not normally happen.
- log.Print("appengine: unbalanced limitSem release!")
- }
-}
-
-func limitDial(network, addr string) (net.Conn, error) {
- limitSem <- 1
-
- // Dial with a timeout in case the API host is MIA.
- // The connection should normally be very fast.
- conn, err := net.DialTimeout(network, addr, 10*time.Second)
- if err != nil {
- limitRelease()
- return nil, err
- }
- lc := &limitConn{Conn: conn}
- runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
- return lc, nil
-}
-
-type limitConn struct {
- close sync.Once
- net.Conn
-}
-
-func (lc *limitConn) Close() error {
- defer lc.close.Do(func() {
- limitRelease()
- runtime.SetFinalizer(lc, nil)
- })
- return lc.Conn.Close()
-}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
deleted file mode 100644
index 2fdb546a6..000000000
--- a/vendor/google.golang.org/appengine/internal/regen.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash -e
-#
-# This script rebuilds the generated code for the protocol buffers.
-# To run this you will need protoc and goprotobuf installed;
-# see https://github.com/golang/protobuf for instructions.
-
-PKG=google.golang.org/appengine
-
-function die() {
- echo 1>&2 $*
- exit 1
-}
-
-# Sanity check that the right tools are accessible.
-for tool in go protoc protoc-gen-go; do
- q=$(which $tool) || die "didn't find $tool"
- echo 1>&2 "$tool: $q"
-done
-
-echo -n 1>&2 "finding package dir... "
-pkgdir=$(go list -f '{{.Dir}}' $PKG)
-echo 1>&2 $pkgdir
-base=$(echo $pkgdir | sed "s,/$PKG\$,,")
-echo 1>&2 "base: $base"
-cd $base
-
-# Run protoc once per package.
-for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
- echo 1>&2 "* $dir"
- protoc --go_out=. $dir/*.proto
-done
-
-for f in $(find $PKG/internal -name '*.pb.go'); do
- # Remove proto.RegisterEnum calls.
- # These cause duplicate registration panics when these packages
- # are used on classic App Engine. proto.RegisterEnum only affects
- # parsing the text format; we don't care about that.
- # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
- sed -i '/proto.RegisterEnum/d' $f
-done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
deleted file mode 100644
index 8d782a38e..000000000
--- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
+++ /dev/null
@@ -1,361 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
-
-package remote_api
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type RpcError_ErrorCode int32
-
-const (
- RpcError_UNKNOWN RpcError_ErrorCode = 0
- RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
- RpcError_PARSE_ERROR RpcError_ErrorCode = 2
- RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
- RpcError_OVER_QUOTA RpcError_ErrorCode = 4
- RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
- RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
- RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
- RpcError_BAD_REQUEST RpcError_ErrorCode = 8
- RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
- RpcError_CANCELLED RpcError_ErrorCode = 10
- RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
- RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
-)
-
-var RpcError_ErrorCode_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "CALL_NOT_FOUND",
- 2: "PARSE_ERROR",
- 3: "SECURITY_VIOLATION",
- 4: "OVER_QUOTA",
- 5: "REQUEST_TOO_LARGE",
- 6: "CAPABILITY_DISABLED",
- 7: "FEATURE_DISABLED",
- 8: "BAD_REQUEST",
- 9: "RESPONSE_TOO_LARGE",
- 10: "CANCELLED",
- 11: "REPLAY_ERROR",
- 12: "DEADLINE_EXCEEDED",
-}
-var RpcError_ErrorCode_value = map[string]int32{
- "UNKNOWN": 0,
- "CALL_NOT_FOUND": 1,
- "PARSE_ERROR": 2,
- "SECURITY_VIOLATION": 3,
- "OVER_QUOTA": 4,
- "REQUEST_TOO_LARGE": 5,
- "CAPABILITY_DISABLED": 6,
- "FEATURE_DISABLED": 7,
- "BAD_REQUEST": 8,
- "RESPONSE_TOO_LARGE": 9,
- "CANCELLED": 10,
- "REPLAY_ERROR": 11,
- "DEADLINE_EXCEEDED": 12,
-}
-
-func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
- p := new(RpcError_ErrorCode)
- *p = x
- return p
-}
-func (x RpcError_ErrorCode) String() string {
- return proto.EnumName(RpcError_ErrorCode_name, int32(x))
-}
-func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
- if err != nil {
- return err
- }
- *x = RpcError_ErrorCode(value)
- return nil
-}
-func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0}
-}
-
-type Request struct {
- ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"`
- Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
- Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
- RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-func (*Request) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{0}
-}
-func (m *Request) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Request.Unmarshal(m, b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Request.Marshal(b, m, deterministic)
-}
-func (dst *Request) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Request.Merge(dst, src)
-}
-func (m *Request) XXX_Size() int {
- return xxx_messageInfo_Request.Size(m)
-}
-func (m *Request) XXX_DiscardUnknown() {
- xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-func (m *Request) GetServiceName() string {
- if m != nil && m.ServiceName != nil {
- return *m.ServiceName
- }
- return ""
-}
-
-func (m *Request) GetMethod() string {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return ""
-}
-
-func (m *Request) GetRequest() []byte {
- if m != nil {
- return m.Request
- }
- return nil
-}
-
-func (m *Request) GetRequestId() string {
- if m != nil && m.RequestId != nil {
- return *m.RequestId
- }
- return ""
-}
-
-type ApplicationError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ApplicationError) Reset() { *m = ApplicationError{} }
-func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
-func (*ApplicationError) ProtoMessage() {}
-func (*ApplicationError) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{1}
-}
-func (m *ApplicationError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ApplicationError.Unmarshal(m, b)
-}
-func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic)
-}
-func (dst *ApplicationError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ApplicationError.Merge(dst, src)
-}
-func (m *ApplicationError) XXX_Size() int {
- return xxx_messageInfo_ApplicationError.Size(m)
-}
-func (m *ApplicationError) XXX_DiscardUnknown() {
- xxx_messageInfo_ApplicationError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ApplicationError proto.InternalMessageInfo
-
-func (m *ApplicationError) GetCode() int32 {
- if m != nil && m.Code != nil {
- return *m.Code
- }
- return 0
-}
-
-func (m *ApplicationError) GetDetail() string {
- if m != nil && m.Detail != nil {
- return *m.Detail
- }
- return ""
-}
-
-type RpcError struct {
- Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
- Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RpcError) Reset() { *m = RpcError{} }
-func (m *RpcError) String() string { return proto.CompactTextString(m) }
-func (*RpcError) ProtoMessage() {}
-func (*RpcError) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{2}
-}
-func (m *RpcError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_RpcError.Unmarshal(m, b)
-}
-func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_RpcError.Marshal(b, m, deterministic)
-}
-func (dst *RpcError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RpcError.Merge(dst, src)
-}
-func (m *RpcError) XXX_Size() int {
- return xxx_messageInfo_RpcError.Size(m)
-}
-func (m *RpcError) XXX_DiscardUnknown() {
- xxx_messageInfo_RpcError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RpcError proto.InternalMessageInfo
-
-func (m *RpcError) GetCode() int32 {
- if m != nil && m.Code != nil {
- return *m.Code
- }
- return 0
-}
-
-func (m *RpcError) GetDetail() string {
- if m != nil && m.Detail != nil {
- return *m.Detail
- }
- return ""
-}
-
-type Response struct {
- Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
- Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
- ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"`
- JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"`
- RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Response) Reset() { *m = Response{} }
-func (m *Response) String() string { return proto.CompactTextString(m) }
-func (*Response) ProtoMessage() {}
-func (*Response) Descriptor() ([]byte, []int) {
- return fileDescriptor_remote_api_1978114ec33a273d, []int{3}
-}
-func (m *Response) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Response.Unmarshal(m, b)
-}
-func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Response.Marshal(b, m, deterministic)
-}
-func (dst *Response) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Response.Merge(dst, src)
-}
-func (m *Response) XXX_Size() int {
- return xxx_messageInfo_Response.Size(m)
-}
-func (m *Response) XXX_DiscardUnknown() {
- xxx_messageInfo_Response.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Response proto.InternalMessageInfo
-
-func (m *Response) GetResponse() []byte {
- if m != nil {
- return m.Response
- }
- return nil
-}
-
-func (m *Response) GetException() []byte {
- if m != nil {
- return m.Exception
- }
- return nil
-}
-
-func (m *Response) GetApplicationError() *ApplicationError {
- if m != nil {
- return m.ApplicationError
- }
- return nil
-}
-
-func (m *Response) GetJavaException() []byte {
- if m != nil {
- return m.JavaException
- }
- return nil
-}
-
-func (m *Response) GetRpcError() *RpcError {
- if m != nil {
- return m.RpcError
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Request)(nil), "remote_api.Request")
- proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError")
- proto.RegisterType((*RpcError)(nil), "remote_api.RpcError")
- proto.RegisterType((*Response)(nil), "remote_api.Response")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d)
-}
-
-var fileDescriptor_remote_api_1978114ec33a273d = []byte{
- // 531 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40,
- 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42,
- 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e,
- 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c,
- 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2,
- 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa,
- 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a,
- 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98,
- 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6,
- 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca,
- 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60,
- 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9,
- 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a,
- 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba,
- 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6,
- 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86,
- 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf,
- 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21,
- 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f,
- 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53,
- 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2,
- 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f,
- 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3,
- 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0,
- 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef,
- 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64,
- 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b,
- 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5,
- 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c,
- 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf,
- 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7,
- 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e,
- 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f,
- 0x03, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
deleted file mode 100644
index f21763a4e..000000000
--- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
+++ /dev/null
@@ -1,44 +0,0 @@
-syntax = "proto2";
-option go_package = "remote_api";
-
-package remote_api;
-
-message Request {
- required string service_name = 2;
- required string method = 3;
- required bytes request = 4;
- optional string request_id = 5;
-}
-
-message ApplicationError {
- required int32 code = 1;
- required string detail = 2;
-}
-
-message RpcError {
- enum ErrorCode {
- UNKNOWN = 0;
- CALL_NOT_FOUND = 1;
- PARSE_ERROR = 2;
- SECURITY_VIOLATION = 3;
- OVER_QUOTA = 4;
- REQUEST_TOO_LARGE = 5;
- CAPABILITY_DISABLED = 6;
- FEATURE_DISABLED = 7;
- BAD_REQUEST = 8;
- RESPONSE_TOO_LARGE = 9;
- CANCELLED = 10;
- REPLAY_ERROR = 11;
- DEADLINE_EXCEEDED = 12;
- }
- required int32 code = 1;
- optional string detail = 2;
-}
-
-message Response {
- optional bytes response = 1;
- optional bytes exception = 2;
- optional ApplicationError application_error = 3;
- optional bytes java_exception = 4;
- optional RpcError rpc_error = 5;
-}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
deleted file mode 100644
index 9006ae653..000000000
--- a/vendor/google.golang.org/appengine/internal/transaction.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2014 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-package internal
-
-// This file implements hooks for applying datastore transactions.
-
-import (
- "errors"
- "reflect"
-
- "github.com/golang/protobuf/proto"
- netcontext "golang.org/x/net/context"
-
- basepb "google.golang.org/appengine/internal/base"
- pb "google.golang.org/appengine/internal/datastore"
-)
-
-var transactionSetters = make(map[reflect.Type]reflect.Value)
-
-// RegisterTransactionSetter registers a function that sets transaction information
-// in a protocol buffer message. f should be a function with two arguments,
-// the first being a protocol buffer type, and the second being *datastore.Transaction.
-func RegisterTransactionSetter(f interface{}) {
- v := reflect.ValueOf(f)
- transactionSetters[v.Type().In(0)] = v
-}
-
-// applyTransaction applies the transaction t to message pb
-// by using the relevant setter passed to RegisterTransactionSetter.
-func applyTransaction(pb proto.Message, t *pb.Transaction) {
- v := reflect.ValueOf(pb)
- if f, ok := transactionSetters[v.Type()]; ok {
- f.Call([]reflect.Value{v, reflect.ValueOf(t)})
- }
-}
-
-var transactionKey = "used for *Transaction"
-
-func transactionFromContext(ctx netcontext.Context) *transaction {
- t, _ := ctx.Value(&transactionKey).(*transaction)
- return t
-}
-
-func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
- return netcontext.WithValue(ctx, &transactionKey, t)
-}
-
-type transaction struct {
- transaction pb.Transaction
- finished bool
-}
-
-var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
-
-func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) {
- if transactionFromContext(c) != nil {
- return nil, errors.New("nested transactions are not supported")
- }
-
- // Begin the transaction.
- t := &transaction{}
- req := &pb.BeginTransactionRequest{
- App: proto.String(FullyQualifiedAppID(c)),
- }
- if xg {
- req.AllowMultipleEg = proto.Bool(true)
- }
- if previousTransaction != nil {
- req.PreviousTransaction = previousTransaction
- }
- if readOnly {
- req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum()
- } else {
- req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum()
- }
- if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
- return nil, err
- }
-
- // Call f, rolling back the transaction if f returns a non-nil error, or panics.
- // The panic is not recovered.
- defer func() {
- if t.finished {
- return
- }
- t.finished = true
- // Ignore the error return value, since we are already returning a non-nil
- // error (or we're panicking).
- Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
- }()
- if err := f(withTransaction(c, t)); err != nil {
- return &t.transaction, err
- }
- t.finished = true
-
- // Commit the transaction.
- res := &pb.CommitResponse{}
- err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
- if ae, ok := err.(*APIError); ok {
- /* TODO: restore this conditional
- if appengine.IsDevAppServer() {
- */
- // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
- // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
- if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
- return &t.transaction, ErrConcurrentTransaction
- }
- if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
- return &t.transaction, ErrConcurrentTransaction
- }
- }
- return &t.transaction, err
-}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
deleted file mode 100644
index 5f727750a..000000000
--- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
+++ /dev/null
@@ -1,527 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
-
-package urlfetch
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type URLFetchServiceError_ErrorCode int32
-
-const (
- URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
- URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
- URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
- URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
- URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
- URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
- URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
- URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
- URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
- URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
- URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
- URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
- URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
-)
-
-var URLFetchServiceError_ErrorCode_name = map[int32]string{
- 0: "OK",
- 1: "INVALID_URL",
- 2: "FETCH_ERROR",
- 3: "UNSPECIFIED_ERROR",
- 4: "RESPONSE_TOO_LARGE",
- 5: "DEADLINE_EXCEEDED",
- 6: "SSL_CERTIFICATE_ERROR",
- 7: "DNS_ERROR",
- 8: "CLOSED",
- 9: "INTERNAL_TRANSIENT_ERROR",
- 10: "TOO_MANY_REDIRECTS",
- 11: "MALFORMED_REPLY",
- 12: "CONNECTION_ERROR",
-}
-var URLFetchServiceError_ErrorCode_value = map[string]int32{
- "OK": 0,
- "INVALID_URL": 1,
- "FETCH_ERROR": 2,
- "UNSPECIFIED_ERROR": 3,
- "RESPONSE_TOO_LARGE": 4,
- "DEADLINE_EXCEEDED": 5,
- "SSL_CERTIFICATE_ERROR": 6,
- "DNS_ERROR": 7,
- "CLOSED": 8,
- "INTERNAL_TRANSIENT_ERROR": 9,
- "TOO_MANY_REDIRECTS": 10,
- "MALFORMED_REPLY": 11,
- "CONNECTION_ERROR": 12,
-}
-
-func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
- p := new(URLFetchServiceError_ErrorCode)
- *p = x
- return p
-}
-func (x URLFetchServiceError_ErrorCode) String() string {
- return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
-}
-func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
- if err != nil {
- return err
- }
- *x = URLFetchServiceError_ErrorCode(value)
- return nil
-}
-func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0}
-}
-
-type URLFetchRequest_RequestMethod int32
-
-const (
- URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
- URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
- URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
- URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
- URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
- URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
-)
-
-var URLFetchRequest_RequestMethod_name = map[int32]string{
- 1: "GET",
- 2: "POST",
- 3: "HEAD",
- 4: "PUT",
- 5: "DELETE",
- 6: "PATCH",
-}
-var URLFetchRequest_RequestMethod_value = map[string]int32{
- "GET": 1,
- "POST": 2,
- "HEAD": 3,
- "PUT": 4,
- "DELETE": 5,
- "PATCH": 6,
-}
-
-func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
- p := new(URLFetchRequest_RequestMethod)
- *p = x
- return p
-}
-func (x URLFetchRequest_RequestMethod) String() string {
- return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
-}
-func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
- if err != nil {
- return err
- }
- *x = URLFetchRequest_RequestMethod(value)
- return nil
-}
-func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
-}
-
-type URLFetchServiceError struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
-func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
-func (*URLFetchServiceError) ProtoMessage() {}
-func (*URLFetchServiceError) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0}
-}
-func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b)
-}
-func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchServiceError.Merge(dst, src)
-}
-func (m *URLFetchServiceError) XXX_Size() int {
- return xxx_messageInfo_URLFetchServiceError.Size(m)
-}
-func (m *URLFetchServiceError) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo
-
-type URLFetchRequest struct {
- Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
- Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
- Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
- Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
- FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
- Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
- MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
-func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
-func (*URLFetchRequest) ProtoMessage() {}
-func (*URLFetchRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1}
-}
-func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b)
-}
-func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchRequest.Merge(dst, src)
-}
-func (m *URLFetchRequest) XXX_Size() int {
- return xxx_messageInfo_URLFetchRequest.Size(m)
-}
-func (m *URLFetchRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo
-
-const Default_URLFetchRequest_FollowRedirects bool = true
-const Default_URLFetchRequest_MustValidateServerCertificate bool = true
-
-func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
- if m != nil && m.Method != nil {
- return *m.Method
- }
- return URLFetchRequest_GET
-}
-
-func (m *URLFetchRequest) GetUrl() string {
- if m != nil && m.Url != nil {
- return *m.Url
- }
- return ""
-}
-
-func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *URLFetchRequest) GetPayload() []byte {
- if m != nil {
- return m.Payload
- }
- return nil
-}
-
-func (m *URLFetchRequest) GetFollowRedirects() bool {
- if m != nil && m.FollowRedirects != nil {
- return *m.FollowRedirects
- }
- return Default_URLFetchRequest_FollowRedirects
-}
-
-func (m *URLFetchRequest) GetDeadline() float64 {
- if m != nil && m.Deadline != nil {
- return *m.Deadline
- }
- return 0
-}
-
-func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
- if m != nil && m.MustValidateServerCertificate != nil {
- return *m.MustValidateServerCertificate
- }
- return Default_URLFetchRequest_MustValidateServerCertificate
-}
-
-type URLFetchRequest_Header struct {
- Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
- Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
-func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
-func (*URLFetchRequest_Header) ProtoMessage() {}
-func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0}
-}
-func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b)
-}
-func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src)
-}
-func (m *URLFetchRequest_Header) XXX_Size() int {
- return xxx_messageInfo_URLFetchRequest_Header.Size(m)
-}
-func (m *URLFetchRequest_Header) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo
-
-func (m *URLFetchRequest_Header) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *URLFetchRequest_Header) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-type URLFetchResponse struct {
- Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
- StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
- Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"`
- ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
- ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
- ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
- FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
- ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
- ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
- ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
-func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
-func (*URLFetchResponse) ProtoMessage() {}
-func (*URLFetchResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2}
-}
-func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b)
-}
-func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchResponse.Merge(dst, src)
-}
-func (m *URLFetchResponse) XXX_Size() int {
- return xxx_messageInfo_URLFetchResponse.Size(m)
-}
-func (m *URLFetchResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo
-
-const Default_URLFetchResponse_ContentWasTruncated bool = false
-const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
-const Default_URLFetchResponse_ApiBytesSent int64 = 0
-const Default_URLFetchResponse_ApiBytesReceived int64 = 0
-
-func (m *URLFetchResponse) GetContent() []byte {
- if m != nil {
- return m.Content
- }
- return nil
-}
-
-func (m *URLFetchResponse) GetStatusCode() int32 {
- if m != nil && m.StatusCode != nil {
- return *m.StatusCode
- }
- return 0
-}
-
-func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *URLFetchResponse) GetContentWasTruncated() bool {
- if m != nil && m.ContentWasTruncated != nil {
- return *m.ContentWasTruncated
- }
- return Default_URLFetchResponse_ContentWasTruncated
-}
-
-func (m *URLFetchResponse) GetExternalBytesSent() int64 {
- if m != nil && m.ExternalBytesSent != nil {
- return *m.ExternalBytesSent
- }
- return 0
-}
-
-func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
- if m != nil && m.ExternalBytesReceived != nil {
- return *m.ExternalBytesReceived
- }
- return 0
-}
-
-func (m *URLFetchResponse) GetFinalUrl() string {
- if m != nil && m.FinalUrl != nil {
- return *m.FinalUrl
- }
- return ""
-}
-
-func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
- if m != nil && m.ApiCpuMilliseconds != nil {
- return *m.ApiCpuMilliseconds
- }
- return Default_URLFetchResponse_ApiCpuMilliseconds
-}
-
-func (m *URLFetchResponse) GetApiBytesSent() int64 {
- if m != nil && m.ApiBytesSent != nil {
- return *m.ApiBytesSent
- }
- return Default_URLFetchResponse_ApiBytesSent
-}
-
-func (m *URLFetchResponse) GetApiBytesReceived() int64 {
- if m != nil && m.ApiBytesReceived != nil {
- return *m.ApiBytesReceived
- }
- return Default_URLFetchResponse_ApiBytesReceived
-}
-
-type URLFetchResponse_Header struct {
- Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
- Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
-func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
-func (*URLFetchResponse_Header) ProtoMessage() {}
-func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) {
- return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0}
-}
-func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b)
-}
-func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic)
-}
-func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) {
- xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src)
-}
-func (m *URLFetchResponse_Header) XXX_Size() int {
- return xxx_messageInfo_URLFetchResponse_Header.Size(m)
-}
-func (m *URLFetchResponse_Header) XXX_DiscardUnknown() {
- xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo
-
-func (m *URLFetchResponse_Header) GetKey() string {
- if m != nil && m.Key != nil {
- return *m.Key
- }
- return ""
-}
-
-func (m *URLFetchResponse_Header) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
- }
- return ""
-}
-
-func init() {
- proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError")
- proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest")
- proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header")
- proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse")
- proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header")
-}
-
-func init() {
- proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced)
-}
-
-var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{
- // 770 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54,
- 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29,
- 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e,
- 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d,
- 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b,
- 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27,
- 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92,
- 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7,
- 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17,
- 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec,
- 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c,
- 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a,
- 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01,
- 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14,
- 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f,
- 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07,
- 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87,
- 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a,
- 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a,
- 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37,
- 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc,
- 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde,
- 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71,
- 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17,
- 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea,
- 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4,
- 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6,
- 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96,
- 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d,
- 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d,
- 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb,
- 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad,
- 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86,
- 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20,
- 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e,
- 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f,
- 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21,
- 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c,
- 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b,
- 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6,
- 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02,
- 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b,
- 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9,
- 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e,
- 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97,
- 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3,
- 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8,
- 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05,
- 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
deleted file mode 100644
index f695edf6a..000000000
--- a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+++ /dev/null
@@ -1,64 +0,0 @@
-syntax = "proto2";
-option go_package = "urlfetch";
-
-package appengine;
-
-message URLFetchServiceError {
- enum ErrorCode {
- OK = 0;
- INVALID_URL = 1;
- FETCH_ERROR = 2;
- UNSPECIFIED_ERROR = 3;
- RESPONSE_TOO_LARGE = 4;
- DEADLINE_EXCEEDED = 5;
- SSL_CERTIFICATE_ERROR = 6;
- DNS_ERROR = 7;
- CLOSED = 8;
- INTERNAL_TRANSIENT_ERROR = 9;
- TOO_MANY_REDIRECTS = 10;
- MALFORMED_REPLY = 11;
- CONNECTION_ERROR = 12;
- }
-}
-
-message URLFetchRequest {
- enum RequestMethod {
- GET = 1;
- POST = 2;
- HEAD = 3;
- PUT = 4;
- DELETE = 5;
- PATCH = 6;
- }
- required RequestMethod Method = 1;
- required string Url = 2;
- repeated group Header = 3 {
- required string Key = 4;
- required string Value = 5;
- }
- optional bytes Payload = 6 [ctype=CORD];
-
- optional bool FollowRedirects = 7 [default=true];
-
- optional double Deadline = 8;
-
- optional bool MustValidateServerCertificate = 9 [default=true];
-}
-
-message URLFetchResponse {
- optional bytes Content = 1;
- required int32 StatusCode = 2;
- repeated group Header = 3 {
- required string Key = 4;
- required string Value = 5;
- }
- optional bool ContentWasTruncated = 6 [default=false];
- optional int64 ExternalBytesSent = 7;
- optional int64 ExternalBytesReceived = 8;
-
- optional string FinalUrl = 9;
-
- optional int64 ApiCpuMilliseconds = 10 [default=0];
- optional int64 ApiBytesSent = 11 [default=0];
- optional int64 ApiBytesReceived = 12 [default=0];
-}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
deleted file mode 100644
index 6ffe1e6d9..000000000
--- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2011 Google Inc. All rights reserved.
-// Use of this source code is governed by the Apache 2.0
-// license that can be found in the LICENSE file.
-
-// Package urlfetch provides an http.RoundTripper implementation
-// for fetching URLs via App Engine's urlfetch service.
-package urlfetch // import "google.golang.org/appengine/urlfetch"
-
-import (
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
-
- "google.golang.org/appengine/internal"
- pb "google.golang.org/appengine/internal/urlfetch"
-)
-
-// Transport is an implementation of http.RoundTripper for
-// App Engine. Users should generally create an http.Client using
-// this transport and use the Client rather than using this transport
-// directly.
-type Transport struct {
- Context context.Context
-
- // Controls whether the application checks the validity of SSL certificates
- // over HTTPS connections. A value of false (the default) instructs the
- // application to send a request to the server only if the certificate is
- // valid and signed by a trusted certificate authority (CA), and also
- // includes a hostname that matches the certificate. A value of true
- // instructs the application to perform no certificate validation.
- AllowInvalidServerCertificate bool
-}
-
-// Verify statically that *Transport implements http.RoundTripper.
-var _ http.RoundTripper = (*Transport)(nil)
-
-// Client returns an *http.Client using a default urlfetch Transport. This
-// client will have the default deadline of 5 seconds, and will check the
-// validity of SSL certificates.
-//
-// Any deadline of the provided context will be used for requests through this client;
-// if the client does not have a deadline then a 5 second default is used.
-func Client(ctx context.Context) *http.Client {
- return &http.Client{
- Transport: &Transport{
- Context: ctx,
- },
- }
-}
-
-type bodyReader struct {
- content []byte
- truncated bool
- closed bool
-}
-
-// ErrTruncatedBody is the error returned after the final Read() from a
-// response's Body if the body has been truncated by App Engine's proxy.
-var ErrTruncatedBody = errors.New("urlfetch: truncated body")
-
-func statusCodeToText(code int) string {
- if t := http.StatusText(code); t != "" {
- return t
- }
- return strconv.Itoa(code)
-}
-
-func (br *bodyReader) Read(p []byte) (n int, err error) {
- if br.closed {
- if br.truncated {
- return 0, ErrTruncatedBody
- }
- return 0, io.EOF
- }
- n = copy(p, br.content)
- if n > 0 {
- br.content = br.content[n:]
- return
- }
- if br.truncated {
- br.closed = true
- return 0, ErrTruncatedBody
- }
- return 0, io.EOF
-}
-
-func (br *bodyReader) Close() error {
- br.closed = true
- br.content = nil
- return nil
-}
-
-// A map of the URL Fetch-accepted methods that take a request body.
-var methodAcceptsRequestBody = map[string]bool{
- "POST": true,
- "PUT": true,
- "PATCH": true,
-}
-
-// urlString returns a valid string given a URL. This function is necessary because
-// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
-// See http://code.google.com/p/go/issues/detail?id=4860.
-func urlString(u *url.URL) string {
- if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
- return u.String()
- }
- aux := *u
- aux.Opaque = "//" + aux.Host + aux.Opaque
- return aux.String()
-}
-
-// RoundTrip issues a single HTTP request and returns its response. Per the
-// http.RoundTripper interface, RoundTrip only returns an error if there
-// was an unsupported request or the URL Fetch proxy fails.
-// Note that HTTP response codes such as 5xx, 403, 404, etc are not
-// errors as far as the transport is concerned and will be returned
-// with err set to nil.
-func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
- methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
- if !ok {
- return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
- }
-
- method := pb.URLFetchRequest_RequestMethod(methNum)
-
- freq := &pb.URLFetchRequest{
- Method: &method,
- Url: proto.String(urlString(req.URL)),
- FollowRedirects: proto.Bool(false), // http.Client's responsibility
- MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
- }
- if deadline, ok := t.Context.Deadline(); ok {
- freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
- }
-
- for k, vals := range req.Header {
- for _, val := range vals {
- freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
- Key: proto.String(k),
- Value: proto.String(val),
- })
- }
- }
- if methodAcceptsRequestBody[req.Method] && req.Body != nil {
- // Avoid a []byte copy if req.Body has a Bytes method.
- switch b := req.Body.(type) {
- case interface {
- Bytes() []byte
- }:
- freq.Payload = b.Bytes()
- default:
- freq.Payload, err = ioutil.ReadAll(req.Body)
- if err != nil {
- return nil, err
- }
- }
- }
-
- fres := &pb.URLFetchResponse{}
- if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
- return nil, err
- }
-
- res = &http.Response{}
- res.StatusCode = int(*fres.StatusCode)
- res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
- res.Header = make(http.Header)
- res.Request = req
-
- // Faked:
- res.ProtoMajor = 1
- res.ProtoMinor = 1
- res.Proto = "HTTP/1.1"
- res.Close = true
-
- for _, h := range fres.Header {
- hkey := http.CanonicalHeaderKey(*h.Key)
- hval := *h.Value
- if hkey == "Content-Length" {
- // Will get filled in below for all but HEAD requests.
- if req.Method == "HEAD" {
- res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
- }
- continue
- }
- res.Header.Add(hkey, hval)
- }
-
- if req.Method != "HEAD" {
- res.ContentLength = int64(len(fres.Content))
- }
-
- truncated := fres.GetContentWasTruncated()
- res.Body = &bodyReader{content: fres.Content, truncated: truncated}
- return
-}
-
-func init() {
- internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
- internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
-}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
new file mode 100644
index 000000000..9f81dbcd8
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go
@@ -0,0 +1,1664 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/expr/v1alpha1/checked.proto
+
+package expr
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// CEL primitive types.
+type Type_PrimitiveType int32
+
+const (
+ // Unspecified type.
+ Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+ // Boolean type.
+ Type_BOOL Type_PrimitiveType = 1
+ // Int64 type.
+ //
+ // Proto-based integer values are widened to int64.
+ Type_INT64 Type_PrimitiveType = 2
+ // Uint64 type.
+ //
+ // Proto-based unsigned integer values are widened to uint64.
+ Type_UINT64 Type_PrimitiveType = 3
+ // Double type.
+ //
+ // Proto-based float values are widened to double values.
+ Type_DOUBLE Type_PrimitiveType = 4
+ // String type.
+ Type_STRING Type_PrimitiveType = 5
+ // Bytes type.
+ Type_BYTES Type_PrimitiveType = 6
+)
+
+// Enum value maps for Type_PrimitiveType.
+var (
+ Type_PrimitiveType_name = map[int32]string{
+ 0: "PRIMITIVE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "UINT64",
+ 4: "DOUBLE",
+ 5: "STRING",
+ 6: "BYTES",
+ }
+ Type_PrimitiveType_value = map[string]int32{
+ "PRIMITIVE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "UINT64": 3,
+ "DOUBLE": 4,
+ "STRING": 5,
+ "BYTES": 6,
+ }
+)
+
+func (x Type_PrimitiveType) Enum() *Type_PrimitiveType {
+ p := new(Type_PrimitiveType)
+ *p = x
+ return p
+}
+
+func (x Type_PrimitiveType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_expr_v1alpha1_checked_proto_enumTypes[0].Descriptor()
+}
+
+func (Type_PrimitiveType) Type() protoreflect.EnumType {
+ return &file_google_api_expr_v1alpha1_checked_proto_enumTypes[0]
+}
+
+func (x Type_PrimitiveType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_PrimitiveType.Descriptor instead.
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// Well-known protobuf types treated with first-class support in CEL.
+type Type_WellKnownType int32
+
+const (
+ // Unspecified type.
+ Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+ // Well-known protobuf.Any type.
+ //
+ // Any types are a polymorphic message type. During type-checking they are
+ // treated like `DYN` types, but at runtime they are resolved to a specific
+ // message type specified at evaluation time.
+ Type_ANY Type_WellKnownType = 1
+ // Well-known protobuf.Timestamp type, internally referenced as `timestamp`.
+ Type_TIMESTAMP Type_WellKnownType = 2
+ // Well-known protobuf.Duration type, internally referenced as `duration`.
+ Type_DURATION Type_WellKnownType = 3
+)
+
+// Enum value maps for Type_WellKnownType.
+var (
+ Type_WellKnownType_name = map[int32]string{
+ 0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+ 1: "ANY",
+ 2: "TIMESTAMP",
+ 3: "DURATION",
+ }
+ Type_WellKnownType_value = map[string]int32{
+ "WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+ "ANY": 1,
+ "TIMESTAMP": 2,
+ "DURATION": 3,
+ }
+)
+
+func (x Type_WellKnownType) Enum() *Type_WellKnownType {
+ p := new(Type_WellKnownType)
+ *p = x
+ return p
+}
+
+func (x Type_WellKnownType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_expr_v1alpha1_checked_proto_enumTypes[1].Descriptor()
+}
+
+func (Type_WellKnownType) Type() protoreflect.EnumType {
+ return &file_google_api_expr_v1alpha1_checked_proto_enumTypes[1]
+}
+
+func (x Type_WellKnownType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_WellKnownType.Descriptor instead.
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+// A CEL expression which has been successfully type checked.
+type CheckedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A map from expression ids to resolved references.
+ //
+ // The following entries are in this table:
+ //
+ // - An Ident or Select expression is represented here if it resolves to a
+ // declaration. For instance, if `a.b.c` is represented by
+ // `select(select(id(a), b), c)`, and `a.b` resolves to a declaration,
+ // while `c` is a field selection, then the reference is attached to the
+ // nested select expression (but not to the id or or the outer select).
+ // In turn, if `a` resolves to a declaration and `b.c` are field selections,
+ // the reference is attached to the ident expression.
+ // - Every Call expression has an entry here, identifying the function being
+ // called.
+ // - Every CreateStruct expression for a message has an entry, identifying
+ // the message.
+ ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // A map from expression ids to types.
+ //
+ // Every expression node which has a type different than DYN has a mapping
+ // here. If an expression has type DYN, it is omitted from this map to save
+ // space.
+ TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The source info derived from input that generated the parsed `expr` and
+ // any optimizations made during the type-checking pass.
+ SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ // The expr version indicates the major / minor version number of the `expr`
+ // representation.
+ //
+ // The most common reason for a version change will be to indicate to the CEL
+ // runtimes that transformations have been performed on the expr during static
+ // analysis. In some cases, this will save the runtime the work of applying
+ // the same or similar transformations prior to evaluation.
+ ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"`
+ // The checked expression. Semantically equivalent to the parsed `expr`, but
+ // may have structural differences.
+ Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+}
+
+func (x *CheckedExpr) Reset() {
+ *x = CheckedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CheckedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CheckedExpr) ProtoMessage() {}
+
+func (x *CheckedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead.
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+ if x != nil {
+ return x.ReferenceMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetTypeMap() map[int64]*Type {
+ if x != nil {
+ return x.TypeMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetExprVersion() string {
+ if x != nil {
+ return x.ExprVersion
+ }
+ return ""
+}
+
+func (x *CheckedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+// Represents a CEL type.
+type Type struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The kind of type.
+ //
+ // Types that are assignable to TypeKind:
+ //
+ // *Type_Dyn
+ // *Type_Null
+ // *Type_Primitive
+ // *Type_Wrapper
+ // *Type_WellKnown
+ // *Type_ListType_
+ // *Type_MapType_
+ // *Type_Function
+ // *Type_MessageType
+ // *Type_TypeParam
+ // *Type_Type
+ // *Type_Error
+ // *Type_AbstractType_
+ TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+}
+
+func (x *Type) Reset() {
+ *x = Type{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type) ProtoMessage() {}
+
+func (x *Type) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type.ProtoReflect.Descriptor instead.
+func (*Type) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+ if m != nil {
+ return m.TypeKind
+ }
+ return nil
+}
+
+func (x *Type) GetDyn() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Dyn); ok {
+ return x.Dyn
+ }
+ return nil
+}
+
+func (x *Type) GetNull() structpb.NullValue {
+ if x, ok := x.GetTypeKind().(*Type_Null); ok {
+ return x.Null
+ }
+ return structpb.NullValue_NULL_VALUE
+}
+
+func (x *Type) GetPrimitive() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Primitive); ok {
+ return x.Primitive
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWrapper() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Wrapper); ok {
+ return x.Wrapper
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWellKnown() Type_WellKnownType {
+ if x, ok := x.GetTypeKind().(*Type_WellKnown); ok {
+ return x.WellKnown
+ }
+ return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetListType() *Type_ListType {
+ if x, ok := x.GetTypeKind().(*Type_ListType_); ok {
+ return x.ListType
+ }
+ return nil
+}
+
+func (x *Type) GetMapType() *Type_MapType {
+ if x, ok := x.GetTypeKind().(*Type_MapType_); ok {
+ return x.MapType
+ }
+ return nil
+}
+
+func (x *Type) GetFunction() *Type_FunctionType {
+ if x, ok := x.GetTypeKind().(*Type_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+func (x *Type) GetMessageType() string {
+ if x, ok := x.GetTypeKind().(*Type_MessageType); ok {
+ return x.MessageType
+ }
+ return ""
+}
+
+func (x *Type) GetTypeParam() string {
+ if x, ok := x.GetTypeKind().(*Type_TypeParam); ok {
+ return x.TypeParam
+ }
+ return ""
+}
+
+func (x *Type) GetType() *Type {
+ if x, ok := x.GetTypeKind().(*Type_Type); ok {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Type) GetError() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Type) GetAbstractType() *Type_AbstractType {
+ if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok {
+ return x.AbstractType
+ }
+ return nil
+}
+
+type isType_TypeKind interface {
+ isType_TypeKind()
+}
+
+type Type_Dyn struct {
+ // Dynamic type.
+ Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+ // Null value.
+ Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+ // Primitive types: `true`, `1u`, `-2.0`, `'string'`, `b'bytes'`.
+ Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+ // Wrapper of a primitive type, e.g. `google.protobuf.Int64Value`.
+ Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=google.api.expr.v1alpha1.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+ // Well-known protobuf type such as `google.protobuf.Timestamp`.
+ WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=google.api.expr.v1alpha1.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+ // Parameterized list with elements of `list_type`, e.g. `list`.
+ ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+ // Parameterized map with typed keys and values.
+ MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+ // Function type.
+ Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+ // Protocol buffer message type.
+ //
+ // The `message_type` string specifies the qualified message type name. For
+ // example, `google.plus.Profile`.
+ MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+ // Type param type.
+ //
+ // The `type_param` string specifies the type parameter name, e.g. `list`
+ // would be a `list_type` whose element type was a `type_param` type
+ // named `E`.
+ TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+ // Type type.
+ //
+ // The `type` value specifies the target type. e.g. int is type with a
+ // target type of `Primitive.INT`.
+ Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+ // Error type.
+ //
+ // During type-checking if an expression is an error, its type is propagated
+ // as the `ERROR` type. This permits the type-checker to discover other
+ // errors present in the expression.
+ Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+ // Abstract, application defined type.
+ AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+// Represents a declaration of a named value or function.
+//
+// A declaration is part of the contract between the expression, the agent
+// evaluating that expression, and the caller requesting evaluation.
+type Decl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The fully qualified name of the declaration.
+ //
+ // Declarations are organized in containers and this represents the full path
+ // to the declaration in its container, as in `google.api.expr.Decl`.
+ //
+ // Declarations used as
+ // [FunctionDecl.Overload][google.api.expr.v1alpha1.Decl.FunctionDecl.Overload]
+ // parameters may or may not have a name depending on whether the overload is
+ // function declaration or a function definition containing a result
+ // [Expr][google.api.expr.v1alpha1.Expr].
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The declaration kind.
+ //
+ // Types that are assignable to DeclKind:
+ //
+ // *Decl_Ident
+ // *Decl_Function
+ DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+}
+
+func (x *Decl) Reset() {
+ *x = Decl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl) ProtoMessage() {}
+
+func (x *Decl) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl.ProtoReflect.Descriptor instead.
+func (*Decl) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Decl) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+ if m != nil {
+ return m.DeclKind
+ }
+ return nil
+}
+
+func (x *Decl) GetIdent() *Decl_IdentDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Ident); ok {
+ return x.Ident
+ }
+ return nil
+}
+
+func (x *Decl) GetFunction() *Decl_FunctionDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+type isDecl_DeclKind interface {
+ isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+ // Identifier declaration.
+ Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+ // Function declaration.
+ Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+// Describes a resolved reference to a declaration.
+type Reference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The fully qualified name of the declaration.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // For references to functions, this is a list of `Overload.overload_id`
+ // values which match according to typing rules.
+ //
+ // If the list has more than one element, overload resolution among the
+ // presented candidates must happen at runtime because of dynamic types. The
+ // type checker attempts to narrow down this list as much as possible.
+ //
+ // Empty if this is not a reference to a
+ // [Decl.FunctionDecl][google.api.expr.v1alpha1.Decl.FunctionDecl].
+ OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ // For references to constants, this may contain the value of the
+ // constant if known at compile time.
+ Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Reference) Reset() {
+ *x = Reference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Reference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reference) ProtoMessage() {}
+
+func (x *Reference) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reference.ProtoReflect.Descriptor instead.
+func (*Reference) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reference) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Reference) GetOverloadId() []string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return nil
+}
+
+func (x *Reference) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// List type with typed elements, e.g. `list`.
+type Type_ListType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The element type.
+ ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+}
+
+func (x *Type_ListType) Reset() {
+ *x = Type_ListType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_ListType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_ListType) ProtoMessage() {}
+
+func (x *Type_ListType) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead.
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Type_ListType) GetElemType() *Type {
+ if x != nil {
+ return x.ElemType
+ }
+ return nil
+}
+
+// Map type with parameterized key and value types, e.g. `map`.
+type Type_MapType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of the key.
+ KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+ // The type of the value.
+ ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+}
+
+func (x *Type_MapType) Reset() {
+ *x = Type_MapType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_MapType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_MapType) ProtoMessage() {}
+
+func (x *Type_MapType) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead.
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Type_MapType) GetKeyType() *Type {
+ if x != nil {
+ return x.KeyType
+ }
+ return nil
+}
+
+func (x *Type_MapType) GetValueType() *Type {
+ if x != nil {
+ return x.ValueType
+ }
+ return nil
+}
+
+// Function type with result and arg types.
+type Type_FunctionType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Result type of the function.
+ ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ // Argument types of the function.
+ ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+}
+
+func (x *Type_FunctionType) Reset() {
+ *x = Type_FunctionType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_FunctionType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_FunctionType) ProtoMessage() {}
+
+func (x *Type_FunctionType) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead.
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Type_FunctionType) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Type_FunctionType) GetArgTypes() []*Type {
+ if x != nil {
+ return x.ArgTypes
+ }
+ return nil
+}
+
+// Application defined abstract type.
+type Type_AbstractType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The fully qualified name of this abstract type.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Parameter types for this abstract type.
+ ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+}
+
+func (x *Type_AbstractType) Reset() {
+ *x = Type_AbstractType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_AbstractType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_AbstractType) ProtoMessage() {}
+
+func (x *Type_AbstractType) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead.
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Type_AbstractType) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Type_AbstractType) GetParameterTypes() []*Type {
+ if x != nil {
+ return x.ParameterTypes
+ }
+ return nil
+}
+
+// Identifier declaration which specifies its type and optional `Expr` value.
+//
+// An identifier without a value is a declaration that must be provided at
+// evaluation time. An identifier with a value should resolve to a constant,
+// but may be used in conjunction with other identifiers bound at evaluation
+// time.
+type Decl_IdentDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The type of the identifier.
+ Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // The constant value of the identifier. If not specified, the identifier
+ // must be supplied at evaluation time.
+ Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ // Documentation string for the identifier.
+ Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_IdentDecl) Reset() {
+ *x = Decl_IdentDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_IdentDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_IdentDecl) ProtoMessage() {}
+
+func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead.
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *Decl_IdentDecl) GetType() *Type {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+// Function declaration specifies one or more overloads which indicate the
+// function's parameter types and return type.
+//
+// Functions have no observable side-effects (there may be side-effects like
+// logging which are not observable from CEL).
+type Decl_FunctionDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. List of function overloads, must contain at least one overload.
+ Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+}
+
+func (x *Decl_FunctionDecl) Reset() {
+ *x = Decl_FunctionDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+ if x != nil {
+ return x.Overloads
+ }
+ return nil
+}
+
+// An overload indicates a function's parameter types and return type, and
+// may optionally include a function body described in terms of
+// [Expr][google.api.expr.v1alpha1.Expr] values.
+//
+// Functions overloads are declared in either a function or method
+// call-style. For methods, the `params[0]` is the expected type of the
+// target receiver.
+//
+// Overloads must have non-overlapping argument types after erasure of all
+// parameterized type variables (similar as type erasure in Java).
+type Decl_FunctionDecl_Overload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Globally unique overload name of the function which reflects
+ // the function name and argument types.
+ //
+ // This will be used by a [Reference][google.api.expr.v1alpha1.Reference]
+ // to indicate the `overload_id` that was resolved for the function
+ // `name`.
+ OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ // List of function parameter [Type][google.api.expr.v1alpha1.Type]
+ // values.
+ //
+ // Param types are disjoint after generic type parameters have been
+ // replaced with the type `DYN`. Since the `DYN` type is compatible with
+ // any other type, this means that if `A` is a type parameter, the
+ // function types `int` and `int` are not disjoint. Likewise,
+ // `map` is not disjoint from `map`.
+ //
+ // When the `result_type` of a function is a generic type param, the
+ // type param name also appears as the `type` of on at least one params.
+ Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+ // The type param names associated with the function declaration.
+ //
+ // For example, `function ex(K key, map map) : V` would yield
+ // the type params of `K, V`.
+ TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+ // Required. The result type of the function. For example, the operator
+ // `string.isEmpty()` would have `result_type` of `kind: BOOL`.
+ ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ // Whether the function is to be used in a method call-style `x.f(...)`
+ // or a function call-style `f(x, ...)`.
+ //
+ // For methods, the first parameter declaration, `params[0]` is the
+ // expected type of the target receiver.
+ IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+ // Documentation string for the overload.
+ Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_FunctionDecl_Overload) Reset() {
+ *x = Decl_FunctionDecl_Overload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl_Overload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_checked_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *Decl_FunctionDecl_Overload) GetOverloadId() string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return ""
+}
+
+func (x *Decl_FunctionDecl_Overload) GetParams() []*Type {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+ if x != nil {
+ return x.TypeParams
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+ if x != nil {
+ return x.IsInstanceFunction
+ }
+ return false
+}
+
+func (x *Decl_FunctionDecl_Overload) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+var File_google_api_expr_v1alpha1_checked_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_checked_proto_rawDesc = []byte{
+ 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+ 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x1a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65,
+ 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e,
+ 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x04, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64,
+ 0x45, 0x78, 0x70, 0x72, 0x12, 0x5c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+ 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d,
+ 0x61, 0x70, 0x12, 0x4d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61,
+ 0x70, 0x12, 0x45, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x64, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+ 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0xc8, 0x0b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48,
+ 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x4c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69,
+ 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x48, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+ 0x12, 0x4d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x46, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c,
+ 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70,
+ 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x49, 0x0a, 0x08,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x34, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x12, 0x52, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61,
+ 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x47, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x1a, 0x83, 0x01, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x6b, 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07,
+ 0x6b, 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x8c, 0x01, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c,
+ 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+ 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65,
+ 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67,
+ 0x54, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x6b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63,
+ 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0f, 0x70, 0x61, 0x72,
+ 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70,
+ 0x65, 0x73, 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45,
+ 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a,
+ 0x05, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54,
+ 0x36, 0x34, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04,
+ 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05,
+ 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b,
+ 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c,
+ 0x5f, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50,
+ 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59,
+ 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10,
+ 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42,
+ 0x0b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xb3, 0x05, 0x0a,
+ 0x04, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x05, 0x69, 0x64, 0x65,
+ 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
+ 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65,
+ 0x63, 0x6c, 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x8b, 0x01, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74,
+ 0x44, 0x65, 0x63, 0x6c, 0x12, 0x32, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x03, 0x64, 0x6f, 0x63, 0x1a, 0xee, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x52, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61,
+ 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
+ 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09,
+ 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0x89, 0x02, 0x0a, 0x08, 0x4f, 0x76,
+ 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65,
+ 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
+ 0x1f, 0x0a, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x12, 0x3f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+ 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65,
+ 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x12, 0x69, 0x73, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x03, 0x64, 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69,
+ 0x6e, 0x64, 0x22, 0x7a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x69, 0x64, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x38, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6c,
+ 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09,
+ 0x44, 0x65, 0x63, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67,
+ 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_api_expr_v1alpha1_checked_proto_rawDescOnce sync.Once
+ file_google_api_expr_v1alpha1_checked_proto_rawDescData = file_google_api_expr_v1alpha1_checked_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_checked_proto_rawDescGZIP() []byte {
+ file_google_api_expr_v1alpha1_checked_proto_rawDescOnce.Do(func() {
+ file_google_api_expr_v1alpha1_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_checked_proto_rawDescData)
+ })
+ return file_google_api_expr_v1alpha1_checked_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_google_api_expr_v1alpha1_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_google_api_expr_v1alpha1_checked_proto_goTypes = []interface{}{
+ (Type_PrimitiveType)(0), // 0: google.api.expr.v1alpha1.Type.PrimitiveType
+ (Type_WellKnownType)(0), // 1: google.api.expr.v1alpha1.Type.WellKnownType
+ (*CheckedExpr)(nil), // 2: google.api.expr.v1alpha1.CheckedExpr
+ (*Type)(nil), // 3: google.api.expr.v1alpha1.Type
+ (*Decl)(nil), // 4: google.api.expr.v1alpha1.Decl
+ (*Reference)(nil), // 5: google.api.expr.v1alpha1.Reference
+ nil, // 6: google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry
+ nil, // 7: google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry
+ (*Type_ListType)(nil), // 8: google.api.expr.v1alpha1.Type.ListType
+ (*Type_MapType)(nil), // 9: google.api.expr.v1alpha1.Type.MapType
+ (*Type_FunctionType)(nil), // 10: google.api.expr.v1alpha1.Type.FunctionType
+ (*Type_AbstractType)(nil), // 11: google.api.expr.v1alpha1.Type.AbstractType
+ (*Decl_IdentDecl)(nil), // 12: google.api.expr.v1alpha1.Decl.IdentDecl
+ (*Decl_FunctionDecl)(nil), // 13: google.api.expr.v1alpha1.Decl.FunctionDecl
+ (*Decl_FunctionDecl_Overload)(nil), // 14: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload
+ (*SourceInfo)(nil), // 15: google.api.expr.v1alpha1.SourceInfo
+ (*Expr)(nil), // 16: google.api.expr.v1alpha1.Expr
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+ (structpb.NullValue)(0), // 18: google.protobuf.NullValue
+ (*Constant)(nil), // 19: google.api.expr.v1alpha1.Constant
+}
+var file_google_api_expr_v1alpha1_checked_proto_depIdxs = []int32{
+ 6, // 0: google.api.expr.v1alpha1.CheckedExpr.reference_map:type_name -> google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry
+ 7, // 1: google.api.expr.v1alpha1.CheckedExpr.type_map:type_name -> google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry
+ 15, // 2: google.api.expr.v1alpha1.CheckedExpr.source_info:type_name -> google.api.expr.v1alpha1.SourceInfo
+ 16, // 3: google.api.expr.v1alpha1.CheckedExpr.expr:type_name -> google.api.expr.v1alpha1.Expr
+ 17, // 4: google.api.expr.v1alpha1.Type.dyn:type_name -> google.protobuf.Empty
+ 18, // 5: google.api.expr.v1alpha1.Type.null:type_name -> google.protobuf.NullValue
+ 0, // 6: google.api.expr.v1alpha1.Type.primitive:type_name -> google.api.expr.v1alpha1.Type.PrimitiveType
+ 0, // 7: google.api.expr.v1alpha1.Type.wrapper:type_name -> google.api.expr.v1alpha1.Type.PrimitiveType
+ 1, // 8: google.api.expr.v1alpha1.Type.well_known:type_name -> google.api.expr.v1alpha1.Type.WellKnownType
+ 8, // 9: google.api.expr.v1alpha1.Type.list_type:type_name -> google.api.expr.v1alpha1.Type.ListType
+ 9, // 10: google.api.expr.v1alpha1.Type.map_type:type_name -> google.api.expr.v1alpha1.Type.MapType
+ 10, // 11: google.api.expr.v1alpha1.Type.function:type_name -> google.api.expr.v1alpha1.Type.FunctionType
+ 3, // 12: google.api.expr.v1alpha1.Type.type:type_name -> google.api.expr.v1alpha1.Type
+ 17, // 13: google.api.expr.v1alpha1.Type.error:type_name -> google.protobuf.Empty
+ 11, // 14: google.api.expr.v1alpha1.Type.abstract_type:type_name -> google.api.expr.v1alpha1.Type.AbstractType
+ 12, // 15: google.api.expr.v1alpha1.Decl.ident:type_name -> google.api.expr.v1alpha1.Decl.IdentDecl
+ 13, // 16: google.api.expr.v1alpha1.Decl.function:type_name -> google.api.expr.v1alpha1.Decl.FunctionDecl
+ 19, // 17: google.api.expr.v1alpha1.Reference.value:type_name -> google.api.expr.v1alpha1.Constant
+ 5, // 18: google.api.expr.v1alpha1.CheckedExpr.ReferenceMapEntry.value:type_name -> google.api.expr.v1alpha1.Reference
+ 3, // 19: google.api.expr.v1alpha1.CheckedExpr.TypeMapEntry.value:type_name -> google.api.expr.v1alpha1.Type
+ 3, // 20: google.api.expr.v1alpha1.Type.ListType.elem_type:type_name -> google.api.expr.v1alpha1.Type
+ 3, // 21: google.api.expr.v1alpha1.Type.MapType.key_type:type_name -> google.api.expr.v1alpha1.Type
+ 3, // 22: google.api.expr.v1alpha1.Type.MapType.value_type:type_name -> google.api.expr.v1alpha1.Type
+ 3, // 23: google.api.expr.v1alpha1.Type.FunctionType.result_type:type_name -> google.api.expr.v1alpha1.Type
+ 3, // 24: google.api.expr.v1alpha1.Type.FunctionType.arg_types:type_name -> google.api.expr.v1alpha1.Type
+ 3, // 25: google.api.expr.v1alpha1.Type.AbstractType.parameter_types:type_name -> google.api.expr.v1alpha1.Type
+ 3, // 26: google.api.expr.v1alpha1.Decl.IdentDecl.type:type_name -> google.api.expr.v1alpha1.Type
+ 19, // 27: google.api.expr.v1alpha1.Decl.IdentDecl.value:type_name -> google.api.expr.v1alpha1.Constant
+ 14, // 28: google.api.expr.v1alpha1.Decl.FunctionDecl.overloads:type_name -> google.api.expr.v1alpha1.Decl.FunctionDecl.Overload
+ 3, // 29: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload.params:type_name -> google.api.expr.v1alpha1.Type
+ 3, // 30: google.api.expr.v1alpha1.Decl.FunctionDecl.Overload.result_type:type_name -> google.api.expr.v1alpha1.Type
+ 31, // [31:31] is the sub-list for method output_type
+ 31, // [31:31] is the sub-list for method input_type
+ 31, // [31:31] is the sub-list for extension type_name
+ 31, // [31:31] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_checked_proto_init() }
+func file_google_api_expr_v1alpha1_checked_proto_init() {
+ if File_google_api_expr_v1alpha1_checked_proto != nil {
+ return
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CheckedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Reference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_ListType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_MapType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_FunctionType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_AbstractType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_IdentDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl_Overload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Type_Dyn)(nil),
+ (*Type_Null)(nil),
+ (*Type_Primitive)(nil),
+ (*Type_Wrapper)(nil),
+ (*Type_WellKnown)(nil),
+ (*Type_ListType_)(nil),
+ (*Type_MapType_)(nil),
+ (*Type_Function)(nil),
+ (*Type_MessageType)(nil),
+ (*Type_TypeParam)(nil),
+ (*Type_Type)(nil),
+ (*Type_Error)(nil),
+ (*Type_AbstractType_)(nil),
+ }
+ file_google_api_expr_v1alpha1_checked_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Decl_Ident)(nil),
+ (*Decl_Function)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_expr_v1alpha1_checked_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_expr_v1alpha1_checked_proto_goTypes,
+ DependencyIndexes: file_google_api_expr_v1alpha1_checked_proto_depIdxs,
+ EnumInfos: file_google_api_expr_v1alpha1_checked_proto_enumTypes,
+ MessageInfos: file_google_api_expr_v1alpha1_checked_proto_msgTypes,
+ }.Build()
+ File_google_api_expr_v1alpha1_checked_proto = out.File
+ file_google_api_expr_v1alpha1_checked_proto_rawDesc = nil
+ file_google_api_expr_v1alpha1_checked_proto_goTypes = nil
+ file_google_api_expr_v1alpha1_checked_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
new file mode 100644
index 000000000..0a2ffb595
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go
@@ -0,0 +1,580 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/expr/v1alpha1/eval.proto
+
+package expr
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The state of an evaluation.
+//
+// Can represent an inital, partial, or completed state of evaluation.
+type EvalState struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The unique values referenced in this message.
+ Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ // An ordered list of results.
+ //
+ // Tracks the flow of evaluation through the expression.
+ // May be sparse.
+ Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+}
+
+func (x *EvalState) Reset() {
+ *x = EvalState{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvalState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState) ProtoMessage() {}
+
+func (x *EvalState) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
+func (*EvalState) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *EvalState) GetValues() []*ExprValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *EvalState) GetResults() []*EvalState_Result {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
+// The value of an evaluated expression.
+type ExprValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An expression can resolve to a value, error or unknown.
+ //
+ // Types that are assignable to Kind:
+ //
+ // *ExprValue_Value
+ // *ExprValue_Error
+ // *ExprValue_Unknown
+ Kind isExprValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *ExprValue) Reset() {
+ *x = ExprValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExprValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExprValue) ProtoMessage() {}
+
+func (x *ExprValue) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
+func (*ExprValue) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *ExprValue) GetKind() isExprValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *ExprValue) GetValue() *Value {
+ if x, ok := x.GetKind().(*ExprValue_Value); ok {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *ExprValue) GetError() *ErrorSet {
+ if x, ok := x.GetKind().(*ExprValue_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *ExprValue) GetUnknown() *UnknownSet {
+ if x, ok := x.GetKind().(*ExprValue_Unknown); ok {
+ return x.Unknown
+ }
+ return nil
+}
+
+type isExprValue_Kind interface {
+ isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+ // A concrete value.
+ Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+ // The set of errors in the critical path of evalution.
+ //
+ // Only errors in the critical path are included. For example,
+ // `( || true) && ` will only result in ``,
+ // while ` || ` will result in both `` and
+ // ``.
+ //
+ // Errors cause by the presence of other errors are not included in the
+ // set. For example `.foo`, `foo()`, and ` + 1` will
+ // only result in ``.
+ //
+ // Multiple errors *might* be included when evaluation could result
+ // in different errors. For example ` + ` and
+ // `foo(, )` may result in ``, `` or both.
+ // The exact subset of errors included for this case is unspecified and
+ // depends on the implementation details of the evaluator.
+ Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+ // The set of unknowns in the critical path of evaluation.
+ //
+ // Unknown behaves identically to Error with regards to propagation.
+ // Specifically, only unknowns in the critical path are included, unknowns
+ // caused by the presence of other unknowns are not included, and multiple
+ // unknowns *might* be included included when evaluation could result in
+ // different unknowns. For example:
+ //
+ // ( || true) && ->
+ // || ->
+ // .foo ->
+ // foo() ->
+ // + -> or
+ //
+ // Unknown takes precidence over Error in cases where a `Value` can short
+ // circuit the result:
+ //
+ // || ->
+ // && ->
+ //
+ // Errors take precidence in all other cases:
+ //
+ // + ->
+ // foo(, ) ->
+ Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+// A set of errors.
+//
+// The errors included depend on the context. See `ExprValue.error`.
+type ErrorSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The errors in the set.
+ Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+}
+
+func (x *ErrorSet) Reset() {
+ *x = ErrorSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ErrorSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorSet) ProtoMessage() {}
+
+func (x *ErrorSet) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ErrorSet) GetErrors() []*status.Status {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+// A set of expressions for which the value is unknown.
+//
+// The unknowns included depend on the context. See `ExprValue.unknown`.
+type UnknownSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The ids of the expressions with unknown values.
+ Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+}
+
+func (x *UnknownSet) Reset() {
+ *x = UnknownSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnknownSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnknownSet) ProtoMessage() {}
+
+func (x *UnknownSet) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UnknownSet) GetExprs() []int64 {
+ if x != nil {
+ return x.Exprs
+ }
+ return nil
+}
+
+// A single evalution result.
+type EvalState_Result struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The id of the expression this result if for.
+ Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+ // The index in `values` of the resulting value.
+ Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EvalState_Result) Reset() {
+ *x = EvalState_Result{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvalState_Result) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState_Result) ProtoMessage() {}
+
+func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_eval_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *EvalState_Result) GetExpr() int64 {
+ if x != nil {
+ return x.Expr
+ }
+ return 0
+}
+
+func (x *EvalState_Result) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_google_api_expr_v1alpha1_eval_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_eval_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+ 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a,
+ 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72,
+ 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
+ 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc2,
+ 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+ 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x07, 0x72, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61,
+ 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a,
+ 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x22, 0xca, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64,
+ 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e,
+ 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x6c, 0x0a, 0x1c,
+ 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x09, 0x45, 0x76,
+ 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_google_api_expr_v1alpha1_eval_proto_rawDescOnce sync.Once
+ file_google_api_expr_v1alpha1_eval_proto_rawDescData = file_google_api_expr_v1alpha1_eval_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_eval_proto_rawDescGZIP() []byte {
+ file_google_api_expr_v1alpha1_eval_proto_rawDescOnce.Do(func() {
+ file_google_api_expr_v1alpha1_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_eval_proto_rawDescData)
+ })
+ return file_google_api_expr_v1alpha1_eval_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_google_api_expr_v1alpha1_eval_proto_goTypes = []interface{}{
+ (*EvalState)(nil), // 0: google.api.expr.v1alpha1.EvalState
+ (*ExprValue)(nil), // 1: google.api.expr.v1alpha1.ExprValue
+ (*ErrorSet)(nil), // 2: google.api.expr.v1alpha1.ErrorSet
+ (*UnknownSet)(nil), // 3: google.api.expr.v1alpha1.UnknownSet
+ (*EvalState_Result)(nil), // 4: google.api.expr.v1alpha1.EvalState.Result
+ (*Value)(nil), // 5: google.api.expr.v1alpha1.Value
+ (*status.Status)(nil), // 6: google.rpc.Status
+}
+var file_google_api_expr_v1alpha1_eval_proto_depIdxs = []int32{
+ 1, // 0: google.api.expr.v1alpha1.EvalState.values:type_name -> google.api.expr.v1alpha1.ExprValue
+ 4, // 1: google.api.expr.v1alpha1.EvalState.results:type_name -> google.api.expr.v1alpha1.EvalState.Result
+ 5, // 2: google.api.expr.v1alpha1.ExprValue.value:type_name -> google.api.expr.v1alpha1.Value
+ 2, // 3: google.api.expr.v1alpha1.ExprValue.error:type_name -> google.api.expr.v1alpha1.ErrorSet
+ 3, // 4: google.api.expr.v1alpha1.ExprValue.unknown:type_name -> google.api.expr.v1alpha1.UnknownSet
+ 6, // 5: google.api.expr.v1alpha1.ErrorSet.errors:type_name -> google.rpc.Status
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_eval_proto_init() }
+func file_google_api_expr_v1alpha1_eval_proto_init() {
+ if File_google_api_expr_v1alpha1_eval_proto != nil {
+ return
+ }
+ file_google_api_expr_v1alpha1_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_expr_v1alpha1_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvalState); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExprValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ErrorSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnknownSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvalState_Result); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_api_expr_v1alpha1_eval_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*ExprValue_Value)(nil),
+ (*ExprValue_Error)(nil),
+ (*ExprValue_Unknown)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_expr_v1alpha1_eval_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_expr_v1alpha1_eval_proto_goTypes,
+ DependencyIndexes: file_google_api_expr_v1alpha1_eval_proto_depIdxs,
+ MessageInfos: file_google_api_expr_v1alpha1_eval_proto_msgTypes,
+ }.Build()
+ File_google_api_expr_v1alpha1_eval_proto = out.File
+ file_google_api_expr_v1alpha1_eval_proto_rawDesc = nil
+ file_google_api_expr_v1alpha1_eval_proto_goTypes = nil
+ file_google_api_expr_v1alpha1_eval_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
new file mode 100644
index 000000000..57aaa2c9f
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go
@@ -0,0 +1,275 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/expr/v1alpha1/explain.proto
+
+package expr
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Values of intermediate expressions produced when evaluating expression.
+// Deprecated, use `EvalState` instead.
+//
+// Deprecated: Do not use.
+type Explain struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // All of the observed values.
+ //
+ // The field value_index is an index in the values list.
+ // Separating values from steps is needed to remove redundant values.
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ // List of steps.
+ //
+ // Repeated evaluations of the same expression generate new ExprStep
+ // instances. The order of such ExprStep instances matches the order of
+ // elements returned by Comprehension.iter_range.
+ ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+}
+
+func (x *Explain) Reset() {
+ *x = Explain{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain) ProtoMessage() {}
+
+func (x *Explain) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
+func (*Explain) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Explain) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *Explain) GetExprSteps() []*Explain_ExprStep {
+ if x != nil {
+ return x.ExprSteps
+ }
+ return nil
+}
+
+// ID and value index of one step.
+type Explain_ExprStep struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // ID of corresponding Expr node.
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Index of the value in the values list.
+ ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+}
+
+func (x *Explain_ExprStep) Reset() {
+ *x = Explain_ExprStep{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain_ExprStep) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain_ExprStep) ProtoMessage() {}
+
+func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_explain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Explain_ExprStep) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Explain_ExprStep) GetValueIndex() int32 {
+ if x != nil {
+ return x.ValueIndex
+ }
+ return 0
+}
+
+var File_google_api_expr_v1alpha1_explain_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_explain_proto_rawDesc = []byte{
+ 0x0a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+ 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x1a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65,
+ 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xce, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x37, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x49, 0x0a,
+ 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
+ 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
+ 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x6f, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_google_api_expr_v1alpha1_explain_proto_rawDescOnce sync.Once
+ file_google_api_expr_v1alpha1_explain_proto_rawDescData = file_google_api_expr_v1alpha1_explain_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_explain_proto_rawDescGZIP() []byte {
+ file_google_api_expr_v1alpha1_explain_proto_rawDescOnce.Do(func() {
+ file_google_api_expr_v1alpha1_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_explain_proto_rawDescData)
+ })
+ return file_google_api_expr_v1alpha1_explain_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_api_expr_v1alpha1_explain_proto_goTypes = []interface{}{
+ (*Explain)(nil), // 0: google.api.expr.v1alpha1.Explain
+ (*Explain_ExprStep)(nil), // 1: google.api.expr.v1alpha1.Explain.ExprStep
+ (*Value)(nil), // 2: google.api.expr.v1alpha1.Value
+}
+var file_google_api_expr_v1alpha1_explain_proto_depIdxs = []int32{
+ 2, // 0: google.api.expr.v1alpha1.Explain.values:type_name -> google.api.expr.v1alpha1.Value
+ 1, // 1: google.api.expr.v1alpha1.Explain.expr_steps:type_name -> google.api.expr.v1alpha1.Explain.ExprStep
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_explain_proto_init() }
+func file_google_api_expr_v1alpha1_explain_proto_init() {
+ if File_google_api_expr_v1alpha1_explain_proto != nil {
+ return
+ }
+ file_google_api_expr_v1alpha1_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_expr_v1alpha1_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain_ExprStep); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_expr_v1alpha1_explain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_expr_v1alpha1_explain_proto_goTypes,
+ DependencyIndexes: file_google_api_expr_v1alpha1_explain_proto_depIdxs,
+ MessageInfos: file_google_api_expr_v1alpha1_explain_proto_msgTypes,
+ }.Build()
+ File_google_api_expr_v1alpha1_explain_proto = out.File
+ file_google_api_expr_v1alpha1_explain_proto_rawDesc = nil
+ file_google_api_expr_v1alpha1_explain_proto_goTypes = nil
+ file_google_api_expr_v1alpha1_explain_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
new file mode 100644
index 000000000..c90c6015d
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go
@@ -0,0 +1,2040 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/expr/v1alpha1/syntax.proto
+
+package expr
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// CEL component specifier.
+type SourceInfo_Extension_Component int32
+
+const (
+ // Unspecified, default.
+ SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0
+ // Parser. Converts a CEL string to an AST.
+ SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1
+ // Type checker. Checks that references in an AST are defined and types
+ // agree.
+ SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2
+ // Runtime. Evaluates a parsed and optionally checked CEL AST against a
+ // context.
+ SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3
+)
+
+// Enum value maps for SourceInfo_Extension_Component.
+var (
+ SourceInfo_Extension_Component_name = map[int32]string{
+ 0: "COMPONENT_UNSPECIFIED",
+ 1: "COMPONENT_PARSER",
+ 2: "COMPONENT_TYPE_CHECKER",
+ 3: "COMPONENT_RUNTIME",
+ }
+ SourceInfo_Extension_Component_value = map[string]int32{
+ "COMPONENT_UNSPECIFIED": 0,
+ "COMPONENT_PARSER": 1,
+ "COMPONENT_TYPE_CHECKER": 2,
+ "COMPONENT_RUNTIME": 3,
+ }
+)
+
+func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component {
+ p := new(SourceInfo_Extension_Component)
+ *p = x
+ return p
+}
+
+func (x SourceInfo_Extension_Component) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_expr_v1alpha1_syntax_proto_enumTypes[0].Descriptor()
+}
+
+func (SourceInfo_Extension_Component) Type() protoreflect.EnumType {
+ return &file_google_api_expr_v1alpha1_syntax_proto_enumTypes[0]
+}
+
+func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead.
+func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3, 0, 0}
+}
+
+// An expression together with source information as returned by the parser.
+type ParsedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The parsed expression.
+ Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+ // The source info derived from input that generated the parsed `expr`.
+ SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+}
+
+func (x *ParsedExpr) Reset() {
+ *x = ParsedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ParsedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParsedExpr) ProtoMessage() {}
+
+func (x *ParsedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead.
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ParsedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+func (x *ParsedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+// An abstract representation of a common expression.
+//
+// Expressions are abstractly represented as a collection of identifiers,
+// select statements, function calls, literals, and comprehensions. All
+// operators with the exception of the '.' operator are modelled as function
+// calls. This makes it easy to represent new operators into the existing AST.
+//
+// All references within expressions must resolve to a
+// [Decl][google.api.expr.v1alpha1.Decl] provided at type-check for an
+// expression to be valid. A reference may either be a bare identifier `name` or
+// a qualified identifier `google.api.name`. References may either refer to a
+// value or a function declaration.
+//
+// For example, the expression `google.api.name.startsWith('expr')` references
+// the declaration `google.api.name` within a
+// [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression, and the
+// function declaration `startsWith`.
+type Expr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. An id assigned to this node by the parser which is unique in a
+ // given expression tree. This is used to associate type information and other
+ // attributes to a node in the parse tree.
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Required. Variants of expressions.
+ //
+ // Types that are assignable to ExprKind:
+ //
+ // *Expr_ConstExpr
+ // *Expr_IdentExpr
+ // *Expr_SelectExpr
+ // *Expr_CallExpr
+ // *Expr_ListExpr
+ // *Expr_StructExpr
+ // *Expr_ComprehensionExpr
+ ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+}
+
+func (x *Expr) Reset() {
+ *x = Expr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr) ProtoMessage() {}
+
+func (x *Expr) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr.ProtoReflect.Descriptor instead.
+func (*Expr) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Expr) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (x *Expr) GetConstExpr() *Constant {
+ if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok {
+ return x.ConstExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetIdentExpr() *Expr_Ident {
+ if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok {
+ return x.IdentExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetSelectExpr() *Expr_Select {
+ if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok {
+ return x.SelectExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetCallExpr() *Expr_Call {
+ if x, ok := x.GetExprKind().(*Expr_CallExpr); ok {
+ return x.CallExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetListExpr() *Expr_CreateList {
+ if x, ok := x.GetExprKind().(*Expr_ListExpr); ok {
+ return x.ListExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetStructExpr() *Expr_CreateStruct {
+ if x, ok := x.GetExprKind().(*Expr_StructExpr); ok {
+ return x.StructExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetComprehensionExpr() *Expr_Comprehension {
+ if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok {
+ return x.ComprehensionExpr
+ }
+ return nil
+}
+
+type isExpr_ExprKind interface {
+ isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+ // A literal expression.
+ ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+ // An identifier expression.
+ IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+ // A field selection expression, e.g. `request.auth`.
+ SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+ // A call expression, including calls to predefined functions and operators.
+ CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+ // A list creation expression.
+ ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+ // A map or message creation expression.
+ StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+ // A comprehension expression.
+ ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+// Represents a primitive literal.
+//
+// Named 'Constant' here for backwards compatibility.
+//
+// This is similar as the primitives supported in the well-known type
+// `google.protobuf.Value`, but richer so it can represent CEL's full range of
+// primitives.
+//
+// Lists and structs are not included as constants as these aggregate types may
+// contain [Expr][google.api.expr.v1alpha1.Expr] elements which require
+// evaluation and are thus not constant.
+//
+// Examples of literals include: `"hello"`, `b'bytes'`, `1u`, `4.2`, `-2`,
+// `true`, `null`.
+type Constant struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The valid constant kinds.
+ //
+ // Types that are assignable to ConstantKind:
+ //
+ // *Constant_NullValue
+ // *Constant_BoolValue
+ // *Constant_Int64Value
+ // *Constant_Uint64Value
+ // *Constant_DoubleValue
+ // *Constant_StringValue
+ // *Constant_BytesValue
+ // *Constant_DurationValue
+ // *Constant_TimestampValue
+ ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+}
+
+func (x *Constant) Reset() {
+ *x = Constant{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Constant) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Constant) ProtoMessage() {}
+
+func (x *Constant) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Constant.ProtoReflect.Descriptor instead.
+func (*Constant) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+ if m != nil {
+ return m.ConstantKind
+ }
+ return nil
+}
+
+func (x *Constant) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetConstantKind().(*Constant_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue_NULL_VALUE
+}
+
+func (x *Constant) GetBoolValue() bool {
+ if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Constant) GetInt64Value() int64 {
+ if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetUint64Value() uint64 {
+ if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetDoubleValue() float64 {
+ if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Constant) GetStringValue() string {
+ if x, ok := x.GetConstantKind().(*Constant_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Constant) GetBytesValue() []byte {
+ if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetDurationValue() *durationpb.Duration {
+ if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok {
+ return x.DurationValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetTimestampValue() *timestamppb.Timestamp {
+ if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok {
+ return x.TimestampValue
+ }
+ return nil
+}
+
+type isConstant_ConstantKind interface {
+ isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+ // null value.
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+ // boolean value.
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+ // int64 value.
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+ // uint64 value.
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+ // double value.
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+ // string value.
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+ // bytes value.
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+ // protobuf.Duration value.
+ //
+ // Deprecated: duration is no longer considered a builtin cel type.
+ //
+ // Deprecated: Do not use.
+ DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+ // protobuf.Timestamp value.
+ //
+ // Deprecated: timestamp is no longer considered a builtin cel type.
+ //
+ // Deprecated: Do not use.
+ TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+// Source information collected at parse time.
+type SourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The syntax version of the source, e.g. `cel1`.
+ SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ // The location name. All position information attached to an expression is
+ // relative to this location.
+ //
+ // The location could be a file, UI element, or similar. For example,
+ // `acme/app/AnvilPolicy.cel`.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // Monotonically increasing list of code point offsets where newlines
+ // `\n` appear.
+ //
+ // The line number of a given position is the index `i` where for a given
+ // `id` the `line_offsets[i] < id_positions[id] < line_offsets[i+1]`. The
+ // column may be derivd from `id_positions[id] - line_offsets[i]`.
+ LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+ // A map from the parse node id (e.g. `Expr.id`) to the code point offset
+ // within the source.
+ Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ // A map from the parse node id where a macro replacement was made to the
+ // call `Expr` that resulted in a macro expansion.
+ //
+ // For example, `has(value.field)` is a function call that is replaced by a
+ // `test_only` field selection in the AST. Likewise, the call
+ // `list.exists(e, e > 10)` translates to a comprehension expression. The key
+ // in the map corresponds to the expression id of the expanded macro, and the
+ // value is the call `Expr` that was replaced.
+ MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // A list of tags for extensions that were used while parsing or type checking
+ // the source expression. For example, optimizations that require special
+ // runtime support may be specified.
+ //
+ // These are used to check feature support between components in separate
+ // implementations. This can be used to either skip redundant work or
+ // report an error if the extension is unsupported.
+ Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *SourceInfo) Reset() {
+ *x = SourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo) ProtoMessage() {}
+
+func (x *SourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead.
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SourceInfo) GetSyntaxVersion() string {
+ if x != nil {
+ return x.SyntaxVersion
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLineOffsets() []int32 {
+ if x != nil {
+ return x.LineOffsets
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetPositions() map[int64]int32 {
+ if x != nil {
+ return x.Positions
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetMacroCalls() map[int64]*Expr {
+ if x != nil {
+ return x.MacroCalls
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+// A specific position in source.
+type SourcePosition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The soucre location name (e.g. file name).
+ Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
+ // The UTF-8 code unit offset.
+ Offset int32 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+ // The 1-based index of the starting line in the source text
+ // where the issue occurs, or 0 if unknown.
+ Line int32 `protobuf:"varint,3,opt,name=line,proto3" json:"line,omitempty"`
+ // The 0-based index of the starting position within the line of source text
+ // where the issue occurs. Only meaningful if line is nonzero.
+ Column int32 `protobuf:"varint,4,opt,name=column,proto3" json:"column,omitempty"`
+}
+
+func (x *SourcePosition) Reset() {
+ *x = SourcePosition{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourcePosition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourcePosition) ProtoMessage() {}
+
+func (x *SourcePosition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourcePosition.ProtoReflect.Descriptor instead.
+func (*SourcePosition) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *SourcePosition) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *SourcePosition) GetOffset() int32 {
+ if x != nil {
+ return x.Offset
+ }
+ return 0
+}
+
+func (x *SourcePosition) GetLine() int32 {
+ if x != nil {
+ return x.Line
+ }
+ return 0
+}
+
+func (x *SourcePosition) GetColumn() int32 {
+ if x != nil {
+ return x.Column
+ }
+ return 0
+}
+
+// An identifier expression. e.g. `request`.
+type Expr_Ident struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Holds a single, unqualified identifier, possibly preceded by a
+ // '.'.
+ //
+ // Qualified names are represented by the
+ // [Expr.Select][google.api.expr.v1alpha1.Expr.Select] expression.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Expr_Ident) Reset() {
+ *x = Expr_Ident{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Ident) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Ident) ProtoMessage() {}
+
+func (x *Expr_Ident) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead.
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Expr_Ident) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// A field selection expression. e.g. `request.auth`.
+type Expr_Select struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The target of the selection expression.
+ //
+ // For example, in the select expression `request.auth`, the `request`
+ // portion of the expression is the `operand`.
+ Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+ // Required. The name of the field to select.
+ //
+ // For example, in the select expression `request.auth`, the `auth` portion
+ // of the expression would be the `field`.
+ Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+ // Whether the select is to be interpreted as a field presence test.
+ //
+ // This results from the macro `has(request.auth)`.
+ TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+}
+
+func (x *Expr_Select) Reset() {
+ *x = Expr_Select{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Select) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Select) ProtoMessage() {}
+
+func (x *Expr_Select) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead.
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Expr_Select) GetOperand() *Expr {
+ if x != nil {
+ return x.Operand
+ }
+ return nil
+}
+
+func (x *Expr_Select) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *Expr_Select) GetTestOnly() bool {
+ if x != nil {
+ return x.TestOnly
+ }
+ return false
+}
+
+// A call expression, including calls to predefined functions and operators.
+//
+// For example, `value == 10`, `size(map_value)`.
+type Expr_Call struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The target of an method call-style expression. For example, `x` in
+ // `x.f()`.
+ Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ // Required. The name of the function or method being called.
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ // The arguments.
+ Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+}
+
+func (x *Expr_Call) Reset() {
+ *x = Expr_Call{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Call) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Call) ProtoMessage() {}
+
+func (x *Expr_Call) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead.
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Expr_Call) GetTarget() *Expr {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
+func (x *Expr_Call) GetFunction() string {
+ if x != nil {
+ return x.Function
+ }
+ return ""
+}
+
+func (x *Expr_Call) GetArgs() []*Expr {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+// A list creation expression.
+//
+// Lists may either be homogenous, e.g. `[1, 2, 3]`, or heterogeneous, e.g.
+// `dyn([1, 'hello', 2.0])`
+type Expr_CreateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The elements part of the list.
+ Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+ // The indices within the elements list which are marked as optional
+ // elements.
+ //
+ // When an optional-typed value is present, the value it contains
+ // is included in the list. If the optional-typed value is absent, the list
+ // element is omitted from the CreateList result.
+ OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"`
+}
+
+func (x *Expr_CreateList) Reset() {
+ *x = Expr_CreateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateList) ProtoMessage() {}
+
+func (x *Expr_CreateList) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead.
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Expr_CreateList) GetElements() []*Expr {
+ if x != nil {
+ return x.Elements
+ }
+ return nil
+}
+
+func (x *Expr_CreateList) GetOptionalIndices() []int32 {
+ if x != nil {
+ return x.OptionalIndices
+ }
+ return nil
+}
+
+// A map or message creation expression.
+//
+// Maps are constructed as `{'key_name': 'value'}`. Message construction is
+// similar, but prefixed with a type name and composed of field ids:
+// `types.MyType{field_id: 'value'}`.
+type Expr_CreateStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type name of the message to be created, empty when creating map
+ // literals.
+ MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+ // The entries in the creation expression.
+ Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Expr_CreateStruct) Reset() {
+ *x = Expr_CreateStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct) ProtoMessage() {}
+
+func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *Expr_CreateStruct) GetMessageName() string {
+ if x != nil {
+ return x.MessageName
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+// A comprehension expression applied to a list or map.
+//
+// Comprehensions are not part of the core syntax, but enabled with macros.
+// A macro matches a specific call signature within a parsed AST and replaces
+// the call with an alternate AST block. Macro expansion happens at parse
+// time.
+//
+// The following macros are supported within CEL:
+//
+// Aggregate type macros may be applied to all elements in a list or all keys
+// in a map:
+//
+// - `all`, `exists`, `exists_one` - test a predicate expression against
+// the inputs and return `true` if the predicate is satisfied for all,
+// any, or only one value `list.all(x, x < 10)`.
+// - `filter` - test a predicate expression against the inputs and return
+// the subset of elements which satisfy the predicate:
+// `payments.filter(p, p > 1000)`.
+// - `map` - apply an expression to all elements in the input and return the
+// output aggregate type: `[1, 2, 3].map(i, i * i)`.
+//
+// The `has(m.x)` macro tests whether the property `x` is present in struct
+// `m`. The semantics of this macro depend on the type of `m`. For proto2
+// messages `has(m.x)` is defined as 'defined, but not set`. For proto3, the
+// macro tests whether the property is set to its default. For map and struct
+// types, the macro tests whether the property `x` is defined on `m`.
+//
+// Comprehensions for the standard environment macros evaluation can be best
+// visualized as the following pseudocode:
+//
+// ```
+// let `accu_var` = `accu_init`
+//
+// for (let `iter_var` in `iter_range`) {
+// if (!`loop_condition`) {
+// break
+// }
+// `accu_var` = `loop_step`
+// }
+//
+// return `result`
+// ```
+//
+// Comprehensions for the optional V2 macros which support map-to-map
+// translation differ slightly from the standard environment macros in that
+// they expose both the key or index in addition to the value for each list
+// or map entry:
+//
+// ```
+// let `accu_var` = `accu_init`
+//
+// for (let `iter_var`, `iter_var2` in `iter_range`) {
+// if (!`loop_condition`) {
+// break
+// }
+// `accu_var` = `loop_step`
+// }
+//
+// return `result`
+// ```
+type Expr_Comprehension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the first iteration variable.
+ // When the iter_range is a list, this variable is the list element.
+ // When the iter_range is a map, this variable is the map entry key.
+ IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+ // The name of the second iteration variable, empty if not set.
+ // When the iter_range is a list, this variable is the integer index.
+ // When the iter_range is a map, this variable is the map entry value.
+ // This field is only set for comprehension v2 macros.
+ IterVar2 string `protobuf:"bytes,8,opt,name=iter_var2,json=iterVar2,proto3" json:"iter_var2,omitempty"`
+ // The range over which the comprehension iterates.
+ IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+ // The name of the variable used for accumulation of the result.
+ AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+ // The initial value of the accumulator.
+ AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+ // An expression which can contain iter_var, iter_var2, and accu_var.
+ //
+ // Returns false when the result has been computed and may be used as
+ // a hint to short-circuit the remainder of the comprehension.
+ LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+ // An expression which can contain iter_var, iter_var2, and accu_var.
+ //
+ // Computes the next value of accu_var.
+ LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+ // An expression which can contain accu_var.
+ //
+ // Computes the result.
+ Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+}
+
+func (x *Expr_Comprehension) Reset() {
+ *x = Expr_Comprehension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Comprehension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Comprehension) ProtoMessage() {}
+
+func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead.
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *Expr_Comprehension) GetIterVar() string {
+ if x != nil {
+ return x.IterVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetIterVar2() string {
+ if x != nil {
+ return x.IterVar2
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetIterRange() *Expr {
+ if x != nil {
+ return x.IterRange
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetAccuVar() string {
+ if x != nil {
+ return x.AccuVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetAccuInit() *Expr {
+ if x != nil {
+ return x.AccuInit
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopCondition() *Expr {
+ if x != nil {
+ return x.LoopCondition
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopStep() *Expr {
+ if x != nil {
+ return x.LoopStep
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetResult() *Expr {
+ if x != nil {
+ return x.Result
+ }
+ return nil
+}
+
+// Represents an entry.
+type Expr_CreateStruct_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. An id assigned to this node by the parser which is unique
+ // in a given expression tree. This is used to associate type
+ // information and other attributes to the node.
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // The `Entry` key kinds.
+ //
+ // Types that are assignable to KeyKind:
+ //
+ // *Expr_CreateStruct_Entry_FieldKey
+ // *Expr_CreateStruct_Entry_MapKey
+ KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+ // Required. The value assigned to the key.
+ //
+ // If the optional_entry field is true, the expression must resolve to an
+ // optional-typed value. If the optional value is present, the key will be
+ // set; however, if the optional value is absent, the key will be unset.
+ Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ // Whether the key-value pair is optional.
+ OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"`
+}
+
+func (x *Expr_CreateStruct_Entry) Reset() {
+ *x = Expr_CreateStruct_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+
+func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{1, 4, 0}
+}
+
+func (x *Expr_CreateStruct_Entry) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+ if m != nil {
+ return m.KeyKind
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetFieldKey() string {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+ return x.FieldKey
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+ return x.MapKey
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetValue() *Expr {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool {
+ if x != nil {
+ return x.OptionalEntry
+ }
+ return false
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+ isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+ // The field key for a message creator statement.
+ FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+ // The key expression for a map creation statement.
+ MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+// An extension that was requested for the source expression.
+type SourceInfo_Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier for the extension. Example: constant_folding
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ // If set, the listed components must understand the extension for the
+ // expression to evaluate correctly.
+ //
+ // This field has set semantics, repeated values should be deduplicated.
+ AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=google.api.expr.v1alpha1.SourceInfo_Extension_Component" json:"affected_components,omitempty"`
+ // Version info. May be skipped if it isn't meaningful for the extension.
+ // (for example constant_folding might always be v0.0).
+ Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *SourceInfo_Extension) Reset() {
+ *x = SourceInfo_Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension) ProtoMessage() {}
+
+func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *SourceInfo_Extension) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component {
+ if x != nil {
+ return x.AffectedComponents
+ }
+ return nil
+}
+
+func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+// Version
+type SourceInfo_Extension_Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Major version changes indicate different required support level from
+ // the required components.
+ Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ // Minor version changes must not change the observed behavior from
+ // existing implementations, but may be provided informationally.
+ Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+}
+
+func (x *SourceInfo_Extension_Version) Reset() {
+ *x = SourceInfo_Extension_Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension_Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension_Version) ProtoMessage() {}
+
+func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_syntax_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP(), []int{3, 0, 0}
+}
+
+func (x *SourceInfo_Extension_Version) GetMajor() int64 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *SourceInfo_Extension_Version) GetMinor() int64 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+var File_google_api_expr_v1alpha1_syntax_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_syntax_proto_rawDesc = []byte{
+ 0x0a, 0x25, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+ 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61,
+ 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x87, 0x01, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12,
+ 0x32, 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x12, 0x45, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e,
+ 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
+ 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xcb, 0x0d, 0x0a, 0x04, 0x45,
+ 0x78, 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70,
+ 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63,
+ 0x6f, 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x45, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e,
+ 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+ 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65,
+ 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12,
+ 0x48, 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+ 0x45, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x42, 0x0a, 0x09, 0x63, 0x61, 0x6c,
+ 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+ 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c,
+ 0x6c, 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x48, 0x0a,
+ 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72,
+ 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c,
+ 0x69, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63,
+ 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+ 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x5d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72,
+ 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x1a, 0x75, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x38, 0x0a,
+ 0x07, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a,
+ 0x09, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x8e, 0x01, 0x0a, 0x04, 0x43,
+ 0x61, 0x6c, 0x6c, 0x12, 0x36, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+ 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x73, 0x0a, 0x0a, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61,
+ 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52,
+ 0x0f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73,
+ 0x1a, 0xdb, 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63,
+ 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+ 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65,
+ 0x73, 0x1a, 0xda, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66,
+ 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+ 0x52, 0x08, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x07, 0x6d, 0x61,
+ 0x70, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d,
+ 0x61, 0x70, 0x4b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+ 0x45, 0x78, 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0x9a,
+ 0x03, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x19, 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x69,
+ 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x32, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x32, 0x12, 0x3d, 0x0a, 0x0a, 0x69, 0x74, 0x65, 0x72,
+ 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76,
+ 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69, 0x74,
+ 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75, 0x5f,
+ 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75, 0x56,
+ 0x61, 0x72, 0x12, 0x3b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+ 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74, 0x12,
+ 0x45, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x73,
+ 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70, 0x53,
+ 0x74, 0x65, 0x70, 0x12, 0x36, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x65,
+ 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36,
+ 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34,
+ 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b,
+ 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64,
+ 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79,
+ 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01, 0x48,
+ 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x63,
+ 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x8c, 0x07, 0x0a,
+ 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e, 0x73,
+ 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21,
+ 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74,
+ 0x73, 0x12, 0x51, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63, 0x61,
+ 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e,
+ 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0a, 0x65,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x80, 0x03, 0x0a, 0x09,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x69, 0x0a, 0x13, 0x61, 0x66, 0x66,
+ 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x31, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74,
+ 0x52, 0x12, 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e,
+ 0x65, 0x6e, 0x74, 0x73, 0x12, 0x50, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+ 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a,
+ 0x09, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f,
+ 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45,
+ 0x4e, 0x54, 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43,
+ 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48,
+ 0x45, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f,
+ 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x1a, 0x3c,
+ 0x0a, 0x0e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, 0x0a, 0x0f,
+ 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x72,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x70, 0x0a, 0x0e, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a,
+ 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66,
+ 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42, 0x6e, 0x0a,
+ 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0b, 0x53,
+ 0x79, 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
+ 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61,
+ 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_api_expr_v1alpha1_syntax_proto_rawDescOnce sync.Once
+ file_google_api_expr_v1alpha1_syntax_proto_rawDescData = file_google_api_expr_v1alpha1_syntax_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_syntax_proto_rawDescGZIP() []byte {
+ file_google_api_expr_v1alpha1_syntax_proto_rawDescOnce.Do(func() {
+ file_google_api_expr_v1alpha1_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_syntax_proto_rawDescData)
+ })
+ return file_google_api_expr_v1alpha1_syntax_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_api_expr_v1alpha1_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_api_expr_v1alpha1_syntax_proto_goTypes = []interface{}{
+ (SourceInfo_Extension_Component)(0), // 0: google.api.expr.v1alpha1.SourceInfo.Extension.Component
+ (*ParsedExpr)(nil), // 1: google.api.expr.v1alpha1.ParsedExpr
+ (*Expr)(nil), // 2: google.api.expr.v1alpha1.Expr
+ (*Constant)(nil), // 3: google.api.expr.v1alpha1.Constant
+ (*SourceInfo)(nil), // 4: google.api.expr.v1alpha1.SourceInfo
+ (*SourcePosition)(nil), // 5: google.api.expr.v1alpha1.SourcePosition
+ (*Expr_Ident)(nil), // 6: google.api.expr.v1alpha1.Expr.Ident
+ (*Expr_Select)(nil), // 7: google.api.expr.v1alpha1.Expr.Select
+ (*Expr_Call)(nil), // 8: google.api.expr.v1alpha1.Expr.Call
+ (*Expr_CreateList)(nil), // 9: google.api.expr.v1alpha1.Expr.CreateList
+ (*Expr_CreateStruct)(nil), // 10: google.api.expr.v1alpha1.Expr.CreateStruct
+ (*Expr_Comprehension)(nil), // 11: google.api.expr.v1alpha1.Expr.Comprehension
+ (*Expr_CreateStruct_Entry)(nil), // 12: google.api.expr.v1alpha1.Expr.CreateStruct.Entry
+ (*SourceInfo_Extension)(nil), // 13: google.api.expr.v1alpha1.SourceInfo.Extension
+ nil, // 14: google.api.expr.v1alpha1.SourceInfo.PositionsEntry
+ nil, // 15: google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry
+ (*SourceInfo_Extension_Version)(nil), // 16: google.api.expr.v1alpha1.SourceInfo.Extension.Version
+ (structpb.NullValue)(0), // 17: google.protobuf.NullValue
+ (*durationpb.Duration)(nil), // 18: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp
+}
+var file_google_api_expr_v1alpha1_syntax_proto_depIdxs = []int32{
+ 2, // 0: google.api.expr.v1alpha1.ParsedExpr.expr:type_name -> google.api.expr.v1alpha1.Expr
+ 4, // 1: google.api.expr.v1alpha1.ParsedExpr.source_info:type_name -> google.api.expr.v1alpha1.SourceInfo
+ 3, // 2: google.api.expr.v1alpha1.Expr.const_expr:type_name -> google.api.expr.v1alpha1.Constant
+ 6, // 3: google.api.expr.v1alpha1.Expr.ident_expr:type_name -> google.api.expr.v1alpha1.Expr.Ident
+ 7, // 4: google.api.expr.v1alpha1.Expr.select_expr:type_name -> google.api.expr.v1alpha1.Expr.Select
+ 8, // 5: google.api.expr.v1alpha1.Expr.call_expr:type_name -> google.api.expr.v1alpha1.Expr.Call
+ 9, // 6: google.api.expr.v1alpha1.Expr.list_expr:type_name -> google.api.expr.v1alpha1.Expr.CreateList
+ 10, // 7: google.api.expr.v1alpha1.Expr.struct_expr:type_name -> google.api.expr.v1alpha1.Expr.CreateStruct
+ 11, // 8: google.api.expr.v1alpha1.Expr.comprehension_expr:type_name -> google.api.expr.v1alpha1.Expr.Comprehension
+ 17, // 9: google.api.expr.v1alpha1.Constant.null_value:type_name -> google.protobuf.NullValue
+ 18, // 10: google.api.expr.v1alpha1.Constant.duration_value:type_name -> google.protobuf.Duration
+ 19, // 11: google.api.expr.v1alpha1.Constant.timestamp_value:type_name -> google.protobuf.Timestamp
+ 14, // 12: google.api.expr.v1alpha1.SourceInfo.positions:type_name -> google.api.expr.v1alpha1.SourceInfo.PositionsEntry
+ 15, // 13: google.api.expr.v1alpha1.SourceInfo.macro_calls:type_name -> google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry
+ 13, // 14: google.api.expr.v1alpha1.SourceInfo.extensions:type_name -> google.api.expr.v1alpha1.SourceInfo.Extension
+ 2, // 15: google.api.expr.v1alpha1.Expr.Select.operand:type_name -> google.api.expr.v1alpha1.Expr
+ 2, // 16: google.api.expr.v1alpha1.Expr.Call.target:type_name -> google.api.expr.v1alpha1.Expr
+ 2, // 17: google.api.expr.v1alpha1.Expr.Call.args:type_name -> google.api.expr.v1alpha1.Expr
+ 2, // 18: google.api.expr.v1alpha1.Expr.CreateList.elements:type_name -> google.api.expr.v1alpha1.Expr
+ 12, // 19: google.api.expr.v1alpha1.Expr.CreateStruct.entries:type_name -> google.api.expr.v1alpha1.Expr.CreateStruct.Entry
+ 2, // 20: google.api.expr.v1alpha1.Expr.Comprehension.iter_range:type_name -> google.api.expr.v1alpha1.Expr
+ 2, // 21: google.api.expr.v1alpha1.Expr.Comprehension.accu_init:type_name -> google.api.expr.v1alpha1.Expr
+ 2, // 22: google.api.expr.v1alpha1.Expr.Comprehension.loop_condition:type_name -> google.api.expr.v1alpha1.Expr
+ 2, // 23: google.api.expr.v1alpha1.Expr.Comprehension.loop_step:type_name -> google.api.expr.v1alpha1.Expr
+ 2, // 24: google.api.expr.v1alpha1.Expr.Comprehension.result:type_name -> google.api.expr.v1alpha1.Expr
+ 2, // 25: google.api.expr.v1alpha1.Expr.CreateStruct.Entry.map_key:type_name -> google.api.expr.v1alpha1.Expr
+ 2, // 26: google.api.expr.v1alpha1.Expr.CreateStruct.Entry.value:type_name -> google.api.expr.v1alpha1.Expr
+ 0, // 27: google.api.expr.v1alpha1.SourceInfo.Extension.affected_components:type_name -> google.api.expr.v1alpha1.SourceInfo.Extension.Component
+ 16, // 28: google.api.expr.v1alpha1.SourceInfo.Extension.version:type_name -> google.api.expr.v1alpha1.SourceInfo.Extension.Version
+ 2, // 29: google.api.expr.v1alpha1.SourceInfo.MacroCallsEntry.value:type_name -> google.api.expr.v1alpha1.Expr
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_syntax_proto_init() }
+func file_google_api_expr_v1alpha1_syntax_proto_init() {
+ if File_google_api_expr_v1alpha1_syntax_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParsedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Constant); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourcePosition); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Ident); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Select); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Call); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Comprehension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension_Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Expr_ConstExpr)(nil),
+ (*Expr_IdentExpr)(nil),
+ (*Expr_SelectExpr)(nil),
+ (*Expr_CallExpr)(nil),
+ (*Expr_ListExpr)(nil),
+ (*Expr_StructExpr)(nil),
+ (*Expr_ComprehensionExpr)(nil),
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Constant_NullValue)(nil),
+ (*Constant_BoolValue)(nil),
+ (*Constant_Int64Value)(nil),
+ (*Constant_Uint64Value)(nil),
+ (*Constant_DoubleValue)(nil),
+ (*Constant_StringValue)(nil),
+ (*Constant_BytesValue)(nil),
+ (*Constant_DurationValue)(nil),
+ (*Constant_TimestampValue)(nil),
+ }
+ file_google_api_expr_v1alpha1_syntax_proto_msgTypes[11].OneofWrappers = []interface{}{
+ (*Expr_CreateStruct_Entry_FieldKey)(nil),
+ (*Expr_CreateStruct_Entry_MapKey)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_expr_v1alpha1_syntax_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_expr_v1alpha1_syntax_proto_goTypes,
+ DependencyIndexes: file_google_api_expr_v1alpha1_syntax_proto_depIdxs,
+ EnumInfos: file_google_api_expr_v1alpha1_syntax_proto_enumTypes,
+ MessageInfos: file_google_api_expr_v1alpha1_syntax_proto_msgTypes,
+ }.Build()
+ File_google_api_expr_v1alpha1_syntax_proto = out.File
+ file_google_api_expr_v1alpha1_syntax_proto_rawDesc = nil
+ file_google_api_expr_v1alpha1_syntax_proto_goTypes = nil
+ file_google_api_expr_v1alpha1_syntax_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
new file mode 100644
index 000000000..0a5ca6a1b
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go
@@ -0,0 +1,721 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/expr/v1alpha1/value.proto
+
+package expr
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Represents a CEL value.
+//
+// This is similar to `google.protobuf.Value`, but can represent CEL's full
+// range of values.
+type Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The valid kinds of values.
+ //
+ // Types that are assignable to Kind:
+ //
+ // *Value_NullValue
+ // *Value_BoolValue
+ // *Value_Int64Value
+ // *Value_Uint64Value
+ // *Value_DoubleValue
+ // *Value_StringValue
+ // *Value_BytesValue
+ // *Value_EnumValue
+ // *Value_ObjectValue
+ // *Value_MapValue
+ // *Value_ListValue
+ // *Value_TypeValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *Value) Reset() {
+ *x = Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *Value) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue_NULL_VALUE
+}
+
+func (x *Value) GetBoolValue() bool {
+ if x, ok := x.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Value) GetInt64Value() int64 {
+ if x, ok := x.GetKind().(*Value_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Value) GetUint64Value() uint64 {
+ if x, ok := x.GetKind().(*Value_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Value) GetDoubleValue() float64 {
+ if x, ok := x.GetKind().(*Value_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Value) GetStringValue() string {
+ if x, ok := x.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Value) GetBytesValue() []byte {
+ if x, ok := x.GetKind().(*Value_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+func (x *Value) GetEnumValue() *EnumValue {
+ if x, ok := x.GetKind().(*Value_EnumValue); ok {
+ return x.EnumValue
+ }
+ return nil
+}
+
+func (x *Value) GetObjectValue() *anypb.Any {
+ if x, ok := x.GetKind().(*Value_ObjectValue); ok {
+ return x.ObjectValue
+ }
+ return nil
+}
+
+func (x *Value) GetMapValue() *MapValue {
+ if x, ok := x.GetKind().(*Value_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+ if x, ok := x.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+func (x *Value) GetTypeValue() string {
+ if x, ok := x.GetKind().(*Value_TypeValue); ok {
+ return x.TypeValue
+ }
+ return ""
+}
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ // Null value.
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+ // Boolean value.
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+ // Signed integer value.
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+ // Unsigned integer value.
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+ // Floating point value.
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ // UTF-8 string value.
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+ // Byte string value.
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+ // An enum value.
+ EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+ // The proto message backing an object value.
+ ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+ // Map value.
+ MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ // List value.
+ ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+ // Type value.
+ TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+// An enum value.
+type EnumValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The fully qualified name of the enum type.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // The value of the enum.
+ Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EnumValue) Reset() {
+ *x = EnumValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValue) ProtoMessage() {}
+
+func (x *EnumValue) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
+func (*EnumValue) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EnumValue) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *EnumValue) GetValue() int32 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+// A list.
+//
+// Wrapped in a message so 'not set' and empty can be differentiated, which is
+// required for use in a 'oneof'.
+type ListValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The ordered values in the list.
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ListValue) Reset() {
+ *x = ListValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+// A map.
+//
+// Wrapped in a message so 'not set' and empty can be differentiated, which is
+// required for use in a 'oneof'.
+type MapValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The set of map entries.
+ //
+ // CEL has fewer restrictions on keys, so a protobuf map represenation
+ // cannot be used.
+ Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *MapValue) Reset() {
+ *x = MapValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue) ProtoMessage() {}
+
+func (x *MapValue) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
+func (*MapValue) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MapValue) GetEntries() []*MapValue_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+// An entry in the map.
+type MapValue_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The key.
+ //
+ // Must be unique with in the map.
+ // Currently only boolean, int, uint, and string values can be keys.
+ Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The value.
+ Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MapValue_Entry) Reset() {
+ *x = MapValue_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue_Entry) ProtoMessage() {}
+
+func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_expr_v1alpha1_value_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+ return file_google_api_expr_v1alpha1_value_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *MapValue_Entry) GetKey() *Value {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *MapValue_Entry) GetValue() *Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_google_api_expr_v1alpha1_value_proto protoreflect.FileDescriptor
+
+var file_google_api_expr_v1alpha1_value_proto_rawDesc = []byte{
+ 0x0a, 0x24, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70,
+ 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x04, 0x0a, 0x05, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
+ 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+ 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61,
+ 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+ 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61,
+ 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
+ 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6c, 0x69,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
+ 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0x44, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x37, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x42, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
+ 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
+ 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x71, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x31, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6d, 0x0a, 0x1c, 0x63, 0x6f,
+ 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x31, 0x3b, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_google_api_expr_v1alpha1_value_proto_rawDescOnce sync.Once
+ file_google_api_expr_v1alpha1_value_proto_rawDescData = file_google_api_expr_v1alpha1_value_proto_rawDesc
+)
+
+func file_google_api_expr_v1alpha1_value_proto_rawDescGZIP() []byte {
+ file_google_api_expr_v1alpha1_value_proto_rawDescOnce.Do(func() {
+ file_google_api_expr_v1alpha1_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_expr_v1alpha1_value_proto_rawDescData)
+ })
+ return file_google_api_expr_v1alpha1_value_proto_rawDescData
+}
+
+var file_google_api_expr_v1alpha1_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_google_api_expr_v1alpha1_value_proto_goTypes = []interface{}{
+ (*Value)(nil), // 0: google.api.expr.v1alpha1.Value
+ (*EnumValue)(nil), // 1: google.api.expr.v1alpha1.EnumValue
+ (*ListValue)(nil), // 2: google.api.expr.v1alpha1.ListValue
+ (*MapValue)(nil), // 3: google.api.expr.v1alpha1.MapValue
+ (*MapValue_Entry)(nil), // 4: google.api.expr.v1alpha1.MapValue.Entry
+ (structpb.NullValue)(0), // 5: google.protobuf.NullValue
+ (*anypb.Any)(nil), // 6: google.protobuf.Any
+}
+var file_google_api_expr_v1alpha1_value_proto_depIdxs = []int32{
+ 5, // 0: google.api.expr.v1alpha1.Value.null_value:type_name -> google.protobuf.NullValue
+ 1, // 1: google.api.expr.v1alpha1.Value.enum_value:type_name -> google.api.expr.v1alpha1.EnumValue
+ 6, // 2: google.api.expr.v1alpha1.Value.object_value:type_name -> google.protobuf.Any
+ 3, // 3: google.api.expr.v1alpha1.Value.map_value:type_name -> google.api.expr.v1alpha1.MapValue
+ 2, // 4: google.api.expr.v1alpha1.Value.list_value:type_name -> google.api.expr.v1alpha1.ListValue
+ 0, // 5: google.api.expr.v1alpha1.ListValue.values:type_name -> google.api.expr.v1alpha1.Value
+ 4, // 6: google.api.expr.v1alpha1.MapValue.entries:type_name -> google.api.expr.v1alpha1.MapValue.Entry
+ 0, // 7: google.api.expr.v1alpha1.MapValue.Entry.key:type_name -> google.api.expr.v1alpha1.Value
+ 0, // 8: google.api.expr.v1alpha1.MapValue.Entry.value:type_name -> google.api.expr.v1alpha1.Value
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_google_api_expr_v1alpha1_value_proto_init() }
+func file_google_api_expr_v1alpha1_value_proto_init() {
+ if File_google_api_expr_v1alpha1_value_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_expr_v1alpha1_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_expr_v1alpha1_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_api_expr_v1alpha1_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_Int64Value)(nil),
+ (*Value_Uint64Value)(nil),
+ (*Value_DoubleValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BytesValue)(nil),
+ (*Value_EnumValue)(nil),
+ (*Value_ObjectValue)(nil),
+ (*Value_MapValue)(nil),
+ (*Value_ListValue)(nil),
+ (*Value_TypeValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_expr_v1alpha1_value_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_expr_v1alpha1_value_proto_goTypes,
+ DependencyIndexes: file_google_api_expr_v1alpha1_value_proto_depIdxs,
+ MessageInfos: file_google_api_expr_v1alpha1_value_proto_msgTypes,
+ }.Build()
+ File_google_api_expr_v1alpha1_value_proto = out.File
+ file_google_api_expr_v1alpha1_value_proto_rawDesc = nil
+ file_google_api_expr_v1alpha1_value_proto_goTypes = nil
+ file_google_api_expr_v1alpha1_value_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
index 3543268f8..e7d3805e3 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v4.24.4
// source: google/api/httpbody.proto
package httpbody
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index a6b508188..6ad1b1c1d 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v4.24.4
// source: google/rpc/status.proto
package status
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 608aa6e1a..d9bfa6e1e 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR
organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
-If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
+If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
## Legal requirements
@@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly.
is a great place to start. These issues are well-documented and usually can be
resolved with a single pull request.
-- If you are adding a new file, make sure it has the copyright message template
- at the top as a comment. You can copy over the message from an existing file
+- If you are adding a new file, make sure it has the copyright message template
+ at the top as a comment. You can copy over the message from an existing file
and update the year.
- The grpc package should only depend on standard Go packages and a small number
@@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly.
proposal](https://github.com/grpc/proposal).
- Provide a good **PR description** as a record of **what** change is being made
- and **why** it was made. Link to a github issue if it exists.
+ and **why** it was made. Link to a GitHub issue if it exists.
-- If you want to fix formatting or style, consider whether your changes are an
- obvious improvement or might be considered a personal preference. If a style
- change is based on preference, it likely will not be accepted. If it corrects
- widely agreed-upon anti-patterns, then please do create a PR and explain the
+- If you want to fix formatting or style, consider whether your changes are an
+ obvious improvement or might be considered a personal preference. If a style
+ change is based on preference, it likely will not be accepted. If it corrects
+ widely agreed-upon anti-patterns, then please do create a PR and explain the
benefits of the change.
- Unless your PR is trivial, you should expect there will be reviewer comments
@@ -66,7 +66,7 @@ How to get your contributions merged smoothly and quickly.
- **All tests need to be passing** before your change can be merged. We
recommend you **run tests locally** before creating your PR to catch breakages
early on.
- - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors
+ - `./scripts/vet.sh` to catch vet errors
- `go test -cpu 1,4 -timeout 7m ./...` to run the tests
- `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index c6672c0a3..5d4096d46 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -9,20 +9,28 @@ for general contribution guidelines.
## Maintainers (in alphabetical order)
-- [cesarghali](https://github.com/cesarghali), Google LLC
+- [aranjans](https://github.com/aranjans), Google LLC
+- [arjan-bal](https://github.com/arjan-bal), Google LLC
+- [arvindbr8](https://github.com/arvindbr8), Google LLC
+- [atollena](https://github.com/atollena), Datadog, Inc.
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
-- [menghanl](https://github.com/menghanl), Google LLC
-- [srini100](https://github.com/srini100), Google LLC
+- [erm-g](https://github.com/erm-g), Google LLC
+- [gtcooke94](https://github.com/gtcooke94), Google LLC
+- [purnesh42h](https://github.com/purnesh42h), Google LLC
+- [zasweq](https://github.com/zasweq), Google LLC
## Emeritus Maintainers (in alphabetical order)
-- [adelez](https://github.com/adelez), Google LLC
-- [canguler](https://github.com/canguler), Google LLC
-- [iamqizhao](https://github.com/iamqizhao), Google LLC
-- [jadekler](https://github.com/jadekler), Google LLC
-- [jtattermusch](https://github.com/jtattermusch), Google LLC
-- [lyuxuan](https://github.com/lyuxuan), Google LLC
-- [makmukhi](https://github.com/makmukhi), Google LLC
-- [matt-kwong](https://github.com/matt-kwong), Google LLC
-- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
-- [yongni](https://github.com/yongni), Google LLC
+- [adelez](https://github.com/adelez)
+- [canguler](https://github.com/canguler)
+- [cesarghali](https://github.com/cesarghali)
+- [iamqizhao](https://github.com/iamqizhao)
+- [jeanbza](https://github.com/jeanbza)
+- [jtattermusch](https://github.com/jtattermusch)
+- [lyuxuan](https://github.com/lyuxuan)
+- [makmukhi](https://github.com/makmukhi)
+- [matt-kwong](https://github.com/matt-kwong)
+- [menghanl](https://github.com/menghanl)
+- [nicolasnoble](https://github.com/nicolasnoble)
+- [srini100](https://github.com/srini100)
+- [yongni](https://github.com/yongni)
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
index 1f8960922..be38384ff 100644
--- a/vendor/google.golang.org/grpc/Makefile
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -30,17 +30,20 @@ testdeps:
GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
vet: vetdeps
- ./vet.sh
+ ./scripts/vet.sh
vetdeps:
- ./vet.sh -install
+ ./scripts/vet.sh -install
.PHONY: \
all \
build \
clean \
+ deps \
proto \
test \
+ testsubmodule \
testrace \
+ testdeps \
vet \
vetdeps
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 1bc92248c..b572707c6 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -1,8 +1,8 @@
# gRPC-Go
-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
+[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go)
The [Go][] implementation of [gRPC][]: A high performance, open source, general
RPC framework that puts mobile and HTTP/2 first. For more information see the
@@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the
## Prerequisites
-- **[Go][]**: any one of the **three latest major** [releases][go-releases].
+- **[Go][]**: any one of the **two latest major** [releases][go-releases].
## Installation
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
index be6e10870..abab27937 100644
--- a/vendor/google.golang.org/grpc/SECURITY.md
+++ b/vendor/google.golang.org/grpc/SECURITY.md
@@ -1,3 +1,3 @@
# Security Policy
-For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
+For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
index 712fef4d0..52d530d7a 100644
--- a/vendor/google.golang.org/grpc/attributes/attributes.go
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -121,9 +121,9 @@ func (a *Attributes) String() string {
return sb.String()
}
-func str(x any) string {
+func str(x any) (s string) {
if v, ok := x.(fmt.Stringer); ok {
- return v.String()
+ return fmt.Sprint(v)
} else if v, ok := x.(string); ok {
return v
}
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
index 0787d0b50..d7b40b7cb 100644
--- a/vendor/google.golang.org/grpc/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -39,7 +39,7 @@ type Config struct {
MaxDelay time.Duration
}
-// DefaultConfig is a backoff configuration with the default values specfied
+// DefaultConfig is a backoff configuration with the default values specified
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// This should be useful for callers who want to configure backoff with
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index b6377f445..3a2092f10 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -30,6 +30,8 @@ import (
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
@@ -39,6 +41,8 @@ import (
var (
// m is a map from name to balancer builder.
m = make(map[string]Builder)
+
+ logger = grpclog.Component("balancer")
)
// Register registers the balancer builder to the balancer map. b.Name
@@ -51,7 +55,14 @@ var (
// an init() function), and is not thread-safe. If multiple Balancers are
// registered with the same name, the one registered last will take effect.
func Register(b Builder) {
- m[strings.ToLower(b.Name())] = b
+ name := strings.ToLower(b.Name())
+ if name != b.Name() {
+ // TODO: Skip the use of strings.ToLower() to index the map after v1.59
+ // is released to switch to case sensitive balancer registry. Also,
+ // remove this warning and update the docstrings for Register and Get.
+ logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name())
+ }
+ m[name] = b
}
// unregisterForTesting deletes the balancer with the given name from the
@@ -62,14 +73,33 @@ func unregisterForTesting(name string) {
delete(m, name)
}
+// connectedAddress returns the connected address for a SubConnState. The
+// address is only valid if the state is READY.
+func connectedAddress(scs SubConnState) resolver.Address {
+ return scs.connectedAddress
+}
+
+// setConnectedAddress sets the connected address for a SubConnState.
+func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
+ scs.connectedAddress = addr
+}
+
func init() {
internal.BalancerUnregister = unregisterForTesting
+ internal.ConnectedAddress = connectedAddress
+ internal.SetConnectedAddress = setConnectedAddress
}
// Get returns the resolver builder registered with the given name.
// Note that the compare is done in a case-insensitive fashion.
// If no builder is register with the name, nil will be returned.
func Get(name string) Builder {
+ if strings.ToLower(name) != name {
+ // TODO: Skip the use of strings.ToLower() to index the map after v1.59
+ // is released to switch to case sensitive balancer registry. Also,
+ // remove this warning and update the docstrings for Register and Get.
+ logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name)
+ }
if b, ok := m[strings.ToLower(name)]; ok {
return b
}
@@ -100,7 +130,7 @@ type SubConn interface {
// UpdateAddresses updates the addresses used in this SubConn.
// gRPC checks if currently-connected address is still in the new list.
// If it's in the list, the connection will be kept.
- // If it's not in the list, the connection will gracefully closed, and
+ // If it's not in the list, the connection will gracefully close, and
// a new connection will be created.
//
// This will trigger a state transition for the SubConn.
@@ -112,8 +142,11 @@ type SubConn interface {
Connect()
// GetOrBuildProducer returns a reference to the existing Producer for this
// ProducerBuilder in this SubConn, or, if one does not currently exist,
- // creates a new one and returns it. Returns a close function which must
- // be called when the Producer is no longer needed.
+ // creates a new one and returns it. Returns a close function which may be
+ // called when the Producer is no longer needed. Otherwise the producer
+ // will automatically be closed upon connection loss or subchannel close.
+ // Should only be called on a SubConn in state Ready. Otherwise the
+ // producer will be unable to create streams.
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
// Shutdown shuts down the SubConn gracefully. Any started RPCs will be
// allowed to complete. No future calls should be made on the SubConn.
@@ -217,8 +250,8 @@ type BuildOptions struct {
// implementations which do not communicate with a remote load balancer
// server can ignore this field.
Authority string
- // ChannelzParentID is the parent ClientConn's channelz ID.
- ChannelzParentID *channelz.Identifier
+ // ChannelzParent is the parent ClientConn's channelz channel.
+ ChannelzParent channelz.Identifier
// CustomUserAgent is the custom user agent set on the parent ClientConn.
// The balancer should set the same custom user agent if it creates a
// ClientConn.
@@ -227,6 +260,10 @@ type BuildOptions struct {
// same resolver.Target as passed to the resolver. See the documentation for
// the resolver.Target type for details about what it contains.
Target resolver.Target
+ // MetricsRecorder is the metrics recorder that balancers can use to record
+ // metrics. Balancer implementations which do not register metrics on
+ // metrics registry and record on them can ignore this field.
+ MetricsRecorder estats.MetricsRecorder
}
// Builder creates a balancer.
@@ -394,6 +431,9 @@ type SubConnState struct {
// ConnectionError is set if the ConnectivityState is TransientFailure,
// describing the reason the SubConn failed. Otherwise, it is nil.
ConnectionError error
+ // connectedAddr contains the connected address when ConnectivityState is
+ // Ready. Otherwise, it is indeterminate.
+ connectedAddress resolver.Address
}
// ClientConnState describes the state of a ClientConn relevant to the
@@ -415,8 +455,10 @@ type ProducerBuilder interface {
// Build creates a Producer. The first parameter is always a
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
// associated SubConn), but is declared as `any` to avoid a dependency
- // cycle. Should also return a close function that will be called when all
- // references to the Producer have been given up.
+ // cycle. Build also returns a close function that will be called when all
+ // references to the Producer have been given up for a SubConn, or when a
+ // connectivity state change occurs on the SubConn. The close function
+ // should always block until all asynchronous cleanup work is completed.
Build(grpcClientConnInterface any) (p Producer, close func())
}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index a7f1eeec8..d5ed172ae 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -36,7 +36,7 @@ type baseBuilder struct {
config Config
}
-func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
bal := &baseBalancer{
cc: cc,
pickerBuilder: bb.pickerBuilder,
@@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
}
}
// If resolver state contains no addresses, return an error so ClientConn
- // will trigger re-resolve. Also records this as an resolver error, so when
+ // will trigger re-resolve. Also records this as a resolver error, so when
// the overall state turns transient failure, the error message will have
// the zero address information.
if len(s.ResolverState.Addresses) == 0 {
@@ -259,6 +259,6 @@ type errPicker struct {
err error // Pick() always returns this err.
}
-func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return balancer.PickResult{}, p.err
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
similarity index 71%
rename from vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
rename to vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
index b5568b22e..c51978945 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
@@ -1,9 +1,5 @@
-//go:build !linux
-// +build !linux
-
/*
- *
- * Copyright 2018 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,9 +15,10 @@
*
*/
-package channelz
+// Package internal contains code internal to the pickfirst package.
+package internal
+
+import "math/rand"
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(c any) *SocketOptionData {
- return nil
-}
+// RandShuffle pseudo-randomizes the order of addresses.
+var RandShuffle = rand.Shuffle
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
similarity index 69%
rename from vendor/google.golang.org/grpc/pickfirst.go
rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index 2e9cf66b4..e069346a7 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -16,68 +16,65 @@
*
*/
-package grpc
+// Package pickfirst contains the pick_first load balancing policy.
+package pickfirst
import (
"encoding/json"
"errors"
"fmt"
+ "math/rand"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst/internal"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
-)
-const (
- // PickFirstBalancerName is the name of the pick_first balancer.
- PickFirstBalancerName = "pick_first"
- logPrefix = "[pick-first-lb %p] "
+ _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
)
-func newPickfirstBuilder() balancer.Builder {
- return &pickfirstBuilder{}
+func init() {
+ if envconfig.NewPickFirstEnabled {
+ return
+ }
+ balancer.Register(pickfirstBuilder{})
}
+var logger = grpclog.Component("pick-first-lb")
+
+const (
+ // Name is the name of the pick_first balancer.
+ Name = "pick_first"
+ logPrefix = "[pick-first-lb %p] "
+)
+
type pickfirstBuilder struct{}
-func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
b := &pickfirstBalancer{cc: cc}
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
}
-func (*pickfirstBuilder) Name() string {
- return PickFirstBalancerName
+func (pickfirstBuilder) Name() string {
+ return Name
}
type pfConfig struct {
serviceconfig.LoadBalancingConfig `json:"-"`
// If set to true, instructs the LB policy to shuffle the order of the list
- // of addresses received from the name resolver before attempting to
+ // of endpoints received from the name resolver before attempting to
// connect to them.
ShuffleAddressList bool `json:"shuffleAddressList"`
}
-func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- if !envconfig.PickFirstLBConfig {
- // Prior to supporting loadbalancing configuration, the pick_first LB
- // policy did not implement the balancer.ConfigParser interface. This
- // meant that if a non-empty configuration was passed to it, the service
- // config unmarshaling code would throw a warning log, but would
- // continue using the pick_first LB policy. The code below ensures the
- // same behavior is retained if the env var is not set.
- if string(js) != "{}" {
- logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js))
- }
- return nil, nil
- }
-
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
var cfg pfConfig
if err := json.Unmarshal(js, &cfg); err != nil {
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
@@ -111,9 +108,17 @@ func (b *pickfirstBalancer) ResolverError(err error) {
})
}
+// Shuffler is an interface for shuffling an address list.
+type Shuffler interface {
+ ShuffleAddressListForTesting(n int, swap func(i, j int))
+}
+
+// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
+// is the number of elements. swap swaps the elements with indexes i and j.
+func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
+
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
- addrs := state.ResolverState.Addresses
- if len(addrs) == 0 {
+ if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
// The resolver reported an empty address list. Treat it like an error by
// calling b.ResolverError.
if b.subConn != nil {
@@ -125,22 +130,49 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
b.ResolverError(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
-
// We don't have to guard this block with the env var because ParseConfig
// already does so.
cfg, ok := state.BalancerConfig.(pfConfig)
if state.BalancerConfig != nil && !ok {
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
}
- if cfg.ShuffleAddressList {
- addrs = append([]resolver.Address{}, addrs...)
- grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
- }
if b.logger.V(2) {
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
}
+ var addrs []resolver.Address
+ if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+ // Perform the optional shuffling described in gRFC A62. The shuffling will
+ // change the order of endpoints but not touch the order of the addresses
+ // within each endpoint. - A61
+ if cfg.ShuffleAddressList {
+ endpoints = append([]resolver.Endpoint{}, endpoints...)
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+
+ // "Flatten the list by concatenating the ordered list of addresses for each
+ // of the endpoints, in order." - A61
+ for _, endpoint := range endpoints {
+ // "In the flattened list, interleave addresses from the two address
+ // families, as per RFC-8304 section 4." - A61
+ // TODO: support the above language.
+ addrs = append(addrs, endpoint.Addresses...)
+ }
+ } else {
+ // Endpoints not set, process addresses until we migrate resolver
+ // emissions fully to Endpoints. The top channel does wrap emitted
+ // addresses with endpoints, however some balancers such as weighted
+ // target do not forward the corresponding correct endpoints down/split
+ // endpoints properly. Once all balancers correctly forward endpoints
+ // down, can delete this else conditional.
+ addrs = state.ResolverState.Addresses
+ if cfg.ShuffleAddressList {
+ addrs = append([]resolver.Address{}, addrs...)
+ rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+ }
+ }
+
if b.subConn != nil {
b.cc.UpdateAddresses(b.subConn, addrs)
return nil
@@ -257,7 +289,3 @@ func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
i.subConn.Connect()
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
-
-func init() {
- balancer.Register(newPickfirstBuilder())
-}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
new file mode 100644
index 000000000..985b6edc7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
@@ -0,0 +1,625 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pickfirstleaf contains the pick_first load balancing policy which
+// will be the universal leaf policy after dualstack changes are implemented.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package pickfirstleaf
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "sync"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst/internal"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/envconfig"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+func init() {
+ if envconfig.NewPickFirstEnabled {
+ // Register as the default pick_first balancer.
+ Name = "pick_first"
+ }
+ balancer.Register(pickfirstBuilder{})
+}
+
+var (
+ logger = grpclog.Component("pick-first-leaf-lb")
+ // Name is the name of the pick_first_leaf balancer.
+ // It is changed to "pick_first" in init() if this balancer is to be
+ // registered as the default pickfirst.
+ Name = "pick_first_leaf"
+)
+
+// TODO: change to pick-first when this becomes the default pick_first policy.
+const logPrefix = "[pick-first-leaf-lb %p] "
+
+type pickfirstBuilder struct{}
+
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
+ b := &pickfirstBalancer{
+ cc: cc,
+ addressList: addressList{},
+ subConns: resolver.NewAddressMap(),
+ state: connectivity.Connecting,
+ mu: sync.Mutex{},
+ }
+ b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
+ return b
+}
+
+func (b pickfirstBuilder) Name() string {
+ return Name
+}
+
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var cfg pfConfig
+ if err := json.Unmarshal(js, &cfg); err != nil {
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+ }
+ return cfg, nil
+}
+
+type pfConfig struct {
+ serviceconfig.LoadBalancingConfig `json:"-"`
+
+ // If set to true, instructs the LB policy to shuffle the order of the list
+ // of endpoints received from the name resolver before attempting to
+ // connect to them.
+ ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+// scData keeps track of the current state of the subConn.
+// It is not safe for concurrent access.
+type scData struct {
+ // The following fields are initialized at build time and read-only after
+ // that.
+ subConn balancer.SubConn
+ addr resolver.Address
+
+ state connectivity.State
+ lastErr error
+}
+
+func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+ sd := &scData{
+ state: connectivity.Idle,
+ addr: addr,
+ }
+ sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
+ StateListener: func(state balancer.SubConnState) {
+ b.updateSubConnState(sd, state)
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ sd.subConn = sc
+ return sd, nil
+}
+
+type pickfirstBalancer struct {
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+ logger *internalgrpclog.PrefixLogger
+ cc balancer.ClientConn
+
+ // The mutex is used to ensure synchronization of updates triggered
+ // from the idle picker and the already serialized resolver,
+ // SubConn state updates.
+ mu sync.Mutex
+ state connectivity.State
+ // scData for active subonns mapped by address.
+ subConns *resolver.AddressMap
+ addressList addressList
+ firstPass bool
+ numTF int
+}
+
+// ResolverError is called by the ClientConn when the name resolver produces
+// an error or when pickfirst determined the resolver update to be invalid.
+func (b *pickfirstBalancer) ResolverError(err error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.resolverErrorLocked(err)
+}
+
+func (b *pickfirstBalancer) resolverErrorLocked(err error) {
+ if b.logger.V(2) {
+ b.logger.Infof("Received error from the name resolver: %v", err)
+ }
+
+ // The picker will not change since the balancer does not currently
+ // report an error. If the balancer hasn't received a single good resolver
+ // update yet, transition to TRANSIENT_FAILURE.
+ if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
+ if b.logger.V(2) {
+ b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
+ }
+ return
+ }
+
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
+ })
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
+ // Cleanup state pertaining to the previous resolver state.
+ // Treat an empty address list like an error by calling b.ResolverError.
+ b.state = connectivity.TransientFailure
+ b.closeSubConnsLocked()
+ b.addressList.updateAddrs(nil)
+ b.resolverErrorLocked(errors.New("produced zero addresses"))
+ return balancer.ErrBadResolverState
+ }
+ cfg, ok := state.BalancerConfig.(pfConfig)
+ if state.BalancerConfig != nil && !ok {
+ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
+ }
+
+ if b.logger.V(2) {
+ b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
+ }
+
+ var newAddrs []resolver.Address
+ if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+ // Perform the optional shuffling described in gRFC A62. The shuffling
+ // will change the order of endpoints but not touch the order of the
+ // addresses within each endpoint. - A61
+ if cfg.ShuffleAddressList {
+ endpoints = append([]resolver.Endpoint{}, endpoints...)
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+
+ // "Flatten the list by concatenating the ordered list of addresses for
+ // each of the endpoints, in order." - A61
+ for _, endpoint := range endpoints {
+ // "In the flattened list, interleave addresses from the two address
+ // families, as per RFC-8305 section 4." - A61
+ // TODO: support the above language.
+ newAddrs = append(newAddrs, endpoint.Addresses...)
+ }
+ } else {
+ // Endpoints not set, process addresses until we migrate resolver
+ // emissions fully to Endpoints. The top channel does wrap emitted
+ // addresses with endpoints, however some balancers such as weighted
+ // target do not forward the corresponding correct endpoints down/split
+ // endpoints properly. Once all balancers correctly forward endpoints
+ // down, can delete this else conditional.
+ newAddrs = state.ResolverState.Addresses
+ if cfg.ShuffleAddressList {
+ newAddrs = append([]resolver.Address{}, newAddrs...)
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+ }
+
+ // If an address appears in multiple endpoints or in the same endpoint
+ // multiple times, we keep it only once. We will create only one SubConn
+ // for the address because an AddressMap is used to store SubConns.
+ // Not de-duplicating would result in attempting to connect to the same
+ // SubConn multiple times in the same pass. We don't want this.
+ newAddrs = deDupAddresses(newAddrs)
+
+ // Since we have a new set of addresses, we are again at first pass.
+ b.firstPass = true
+
+ // If the previous ready SubConn exists in new address list,
+ // keep this connection and don't create new SubConns.
+ prevAddr := b.addressList.currentAddress()
+ prevAddrsCount := b.addressList.size()
+ b.addressList.updateAddrs(newAddrs)
+ if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) {
+ return nil
+ }
+
+ b.reconcileSubConnsLocked(newAddrs)
+ // If it's the first resolver update or the balancer was already READY
+ // (but the new address list does not contain the ready SubConn) or
+ // CONNECTING, enter CONNECTING.
+ // We may be in TRANSIENT_FAILURE due to a previous empty address list,
+ // we should still enter CONNECTING because the sticky TF behaviour
+ // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
+ // due to connectivity failures.
+ if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 {
+ // Start connection attempt at first address.
+ b.state = connectivity.Connecting
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ b.requestConnectionLocked()
+ } else if b.state == connectivity.TransientFailure {
+ // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
+ // we're READY. See A62.
+ b.requestConnectionLocked()
+ }
+ return nil
+}
+
+// UpdateSubConnState is unused as a StateListener is always registered when
+// creating SubConns.
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+ b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
+}
+
+func (b *pickfirstBalancer) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.closeSubConnsLocked()
+ b.state = connectivity.Shutdown
+}
+
+// ExitIdle moves the balancer out of idle state. It can be called concurrently
+// by the idlePicker and clientConn so access to variables should be
+// synchronized.
+func (b *pickfirstBalancer) ExitIdle() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() {
+ b.firstPass = true
+ b.requestConnectionLocked()
+ }
+}
+
+func (b *pickfirstBalancer) closeSubConnsLocked() {
+ for _, sd := range b.subConns.Values() {
+ sd.(*scData).subConn.Shutdown()
+ }
+ b.subConns = resolver.NewAddressMap()
+}
+
+// deDupAddresses ensures that each address appears only once in the slice.
+func deDupAddresses(addrs []resolver.Address) []resolver.Address {
+ seenAddrs := resolver.NewAddressMap()
+ retAddrs := []resolver.Address{}
+
+ for _, addr := range addrs {
+ if _, ok := seenAddrs.Get(addr); ok {
+ continue
+ }
+ retAddrs = append(retAddrs, addr)
+ }
+ return retAddrs
+}
+
+// reconcileSubConnsLocked updates the active subchannels based on a new address
+// list from the resolver. It does this by:
+// - closing subchannels: any existing subchannels associated with addresses
+// that are no longer in the updated list are shut down.
+// - removing subchannels: entries for these closed subchannels are removed
+// from the subchannel map.
+//
+// This ensures that the subchannel map accurately reflects the current set of
+// addresses received from the name resolver.
+func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
+ newAddrsMap := resolver.NewAddressMap()
+ for _, addr := range newAddrs {
+ newAddrsMap.Set(addr, true)
+ }
+
+ for _, oldAddr := range b.subConns.Keys() {
+ if _, ok := newAddrsMap.Get(oldAddr); ok {
+ continue
+ }
+ val, _ := b.subConns.Get(oldAddr)
+ val.(*scData).subConn.Shutdown()
+ b.subConns.Delete(oldAddr)
+ }
+}
+
+// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
+// becomes ready, which means that all other subConn must be shutdown.
+func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
+ for _, v := range b.subConns.Values() {
+ sd := v.(*scData)
+ if sd.subConn != selected.subConn {
+ sd.subConn.Shutdown()
+ }
+ }
+ b.subConns = resolver.NewAddressMap()
+ b.subConns.Set(selected.addr, selected)
+}
+
+// requestConnectionLocked starts connecting on the subchannel corresponding to
+// the current address. If no subchannel exists, one is created. If the current
+// subchannel is in TransientFailure, a connection to the next address is
+// attempted until a subchannel is found.
+func (b *pickfirstBalancer) requestConnectionLocked() {
+ if !b.addressList.isValid() {
+ return
+ }
+ var lastErr error
+ for valid := true; valid; valid = b.addressList.increment() {
+ curAddr := b.addressList.currentAddress()
+ sd, ok := b.subConns.Get(curAddr)
+ if !ok {
+ var err error
+ // We want to assign the new scData to sd from the outer scope,
+ // hence we can't use := below.
+ sd, err = b.newSCData(curAddr)
+ if err != nil {
+ // This should never happen, unless the clientConn is being shut
+ // down.
+ if b.logger.V(2) {
+ b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
+ }
+ // Do nothing, the LB policy will be closed soon.
+ return
+ }
+ b.subConns.Set(curAddr, sd)
+ }
+
+ scd := sd.(*scData)
+ switch scd.state {
+ case connectivity.Idle:
+ scd.subConn.Connect()
+ case connectivity.TransientFailure:
+ // Try the next address.
+ lastErr = scd.lastErr
+ continue
+ case connectivity.Ready:
+ // Should never happen.
+ b.logger.Errorf("Requesting a connection even though we have a READY SubConn")
+ case connectivity.Shutdown:
+ // Should never happen.
+ b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map")
+ case connectivity.Connecting:
+ // Wait for the SubConn to report success or failure.
+ }
+ return
+ }
+ // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
+ // first pass.
+ b.endFirstPassLocked(lastErr)
+}
+
+func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ oldState := sd.state
+ sd.state = newState.ConnectivityState
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes this
+ // SubConn.
+ if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd {
+ return
+ }
+ if newState.ConnectivityState == connectivity.Shutdown {
+ return
+ }
+
+ if newState.ConnectivityState == connectivity.Ready {
+ b.shutdownRemainingLocked(sd)
+ if !b.addressList.seekTo(sd.addr) {
+ // This should not fail as we should have only one SubConn after
+ // entering READY. The SubConn should be present in the addressList.
+ b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
+ return
+ }
+ b.state = connectivity.Ready
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+ })
+ return
+ }
+
+ // If the LB policy is READY, and it receives a subchannel state change,
+ // it means that the READY subchannel has failed.
+ // A SubConn can also transition from CONNECTING directly to IDLE when
+ // a transport is successfully created, but the connection fails
+ // before the SubConn can send the notification for READY. We treat
+ // this as a successful connection and transition to IDLE.
+ if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
+ // Once a transport fails, the balancer enters IDLE and starts from
+ // the first address when the picker is used.
+ b.shutdownRemainingLocked(sd)
+ b.state = connectivity.Idle
+ b.addressList.reset()
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Idle,
+ Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
+ })
+ return
+ }
+
+ if b.firstPass {
+ switch newState.ConnectivityState {
+ case connectivity.Connecting:
+ // The balancer can be in either IDLE, CONNECTING or
+ // TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in
+ // TRANSIENT_FAILURE until it's READY. See A62.
+ // If the balancer is already in CONNECTING, no update is needed.
+ if b.state == connectivity.Idle {
+ b.state = connectivity.Connecting
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ }
+ case connectivity.TransientFailure:
+ sd.lastErr = newState.ConnectionError
+ // Since we're re-using common SubConns while handling resolver
+ // updates, we could receive an out of turn TRANSIENT_FAILURE from
+ // a pass over the previous address list. We ignore such updates.
+
+ if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
+ return
+ }
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
+ return
+ }
+ // End of the first pass.
+ b.endFirstPassLocked(newState.ConnectionError)
+ }
+ return
+ }
+
+ // We have finished the first pass, keep re-connecting failing SubConns.
+ switch newState.ConnectivityState {
+ case connectivity.TransientFailure:
+ b.numTF = (b.numTF + 1) % b.subConns.Len()
+ sd.lastErr = newState.ConnectionError
+ if b.numTF%b.subConns.Len() == 0 {
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: newState.ConnectionError},
+ })
+ }
+ // We don't need to request re-resolution since the SubConn already
+ // does that before reporting TRANSIENT_FAILURE.
+ // TODO: #7534 - Move re-resolution requests from SubConn into
+ // pick_first.
+ case connectivity.Idle:
+ sd.subConn.Connect()
+ }
+}
+
+func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) {
+ b.firstPass = false
+ b.numTF = 0
+ b.state = connectivity.TransientFailure
+
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: lastErr},
+ })
+ // Start re-connecting all the SubConns that are already in IDLE.
+ for _, v := range b.subConns.Values() {
+ sd := v.(*scData)
+ if sd.state == connectivity.Idle {
+ sd.subConn.Connect()
+ }
+ }
+}
+
+type picker struct {
+ result balancer.PickResult
+ err error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+ exitIdle func()
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ i.exitIdle()
+ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
+
+// addressList manages sequentially iterating over addresses present in a list
+// of endpoints. It provides a 1 dimensional view of the addresses present in
+// the endpoints.
+// This type is not safe for concurrent access.
+type addressList struct {
+ addresses []resolver.Address
+ idx int
+}
+
+func (al *addressList) isValid() bool {
+ return al.idx < len(al.addresses)
+}
+
+func (al *addressList) size() int {
+ return len(al.addresses)
+}
+
+// increment moves to the next index in the address list.
+// This method returns false if it went off the list, true otherwise.
+func (al *addressList) increment() bool {
+ if !al.isValid() {
+ return false
+ }
+ al.idx++
+ return al.idx < len(al.addresses)
+}
+
+// currentAddress returns the current address pointed to in the addressList.
+// If the list is in an invalid state, it returns an empty address instead.
+func (al *addressList) currentAddress() resolver.Address {
+ if !al.isValid() {
+ return resolver.Address{}
+ }
+ return al.addresses[al.idx]
+}
+
+// first returns the first address in the list. If the list is empty, it returns
+// an empty address instead.
+func (al *addressList) first() resolver.Address {
+ if len(al.addresses) == 0 {
+ return resolver.Address{}
+ }
+ return al.addresses[0]
+}
+
+func (al *addressList) reset() {
+ al.idx = 0
+}
+
+func (al *addressList) updateAddrs(addrs []resolver.Address) {
+ al.addresses = addrs
+ al.reset()
+}
+
+// seekTo returns false if the needle was not found and the current index was
+// left unchanged.
+func (al *addressList) seekTo(needle resolver.Address) bool {
+ for ai, addr := range al.addresses {
+ if !equalAddressIgnoringBalAttributes(&addr, &needle) {
+ continue
+ }
+ al.idx = ai
+ return true
+ }
+ return false
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered
+// equal. This is different from the Equal method on the resolver.Address type
+// which considers all fields to determine equality. Here, we only consider
+// fields that are meaningful to the SubConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes) &&
+ a.Metadata == b.Metadata
+}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index f7031ad22..260255d31 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -22,12 +22,12 @@
package roundrobin
import (
+ "math/rand"
"sync/atomic"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/grpcrand"
)
// Name is the name of round_robin balancer.
@@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
// Start at a random index, as the same RR balancer rebuilds a new
// picker when SubConn states change, and we don't want to apply excess
// load to the first server in the list.
- next: uint32(grpcrand.Intn(len(scs))),
+ next: uint32(rand.Intn(len(scs))),
}
}
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
deleted file mode 100644
index a4411c22b..000000000
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ /dev/null
@@ -1,454 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "fmt"
- "strings"
- "sync"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/internal/balancer/gracefulswitch"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/resolver"
-)
-
-type ccbMode int
-
-const (
- ccbModeActive = iota
- ccbModeIdle
- ccbModeClosed
- ccbModeExitingIdle
-)
-
-// ccBalancerWrapper sits between the ClientConn and the Balancer.
-//
-// ccBalancerWrapper implements methods corresponding to the ones on the
-// balancer.Balancer interface. The ClientConn is free to call these methods
-// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
-// to the Balancer happen synchronously and in order.
-//
-// ccBalancerWrapper also implements the balancer.ClientConn interface and is
-// passed to the Balancer implementations. It invokes unexported methods on the
-// ClientConn to handle these calls from the Balancer.
-//
-// It uses the gracefulswitch.Balancer internally to ensure that balancer
-// switches happen in a graceful manner.
-type ccBalancerWrapper struct {
- // The following fields are initialized when the wrapper is created and are
- // read-only afterwards, and therefore can be accessed without a mutex.
- cc *ClientConn
- opts balancer.BuildOptions
-
- // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a
- // mutually exclusive manner as they are scheduled in the serializer. Fields
- // accessed *only* in these serializer callbacks, can therefore be accessed
- // without a mutex.
- balancer *gracefulswitch.Balancer
- curBalancerName string
-
- // mu guards access to the below fields. Access to the serializer and its
- // cancel function needs to be mutex protected because they are overwritten
- // when the wrapper exits idle mode.
- mu sync.Mutex
- serializer *grpcsync.CallbackSerializer // To serialize all outoing calls.
- serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time.
- mode ccbMode // Tracks the current mode of the wrapper.
-}
-
-// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer
-// is not created until the switchTo() method is invoked.
-func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper {
- ctx, cancel := context.WithCancel(context.Background())
- ccb := &ccBalancerWrapper{
- cc: cc,
- opts: bopts,
- serializer: grpcsync.NewCallbackSerializer(ctx),
- serializerCancel: cancel,
- }
- ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts)
- return ccb
-}
-
-// updateClientConnState is invoked by grpc to push a ClientConnState update to
-// the underlying balancer.
-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
- ccb.mu.Lock()
- errCh := make(chan error, 1)
- // Here and everywhere else where Schedule() is called, it is done with the
- // lock held. But the lock guards only the scheduling part. The actual
- // callback is called asynchronously without the lock being held.
- ok := ccb.serializer.Schedule(func(_ context.Context) {
- errCh <- ccb.balancer.UpdateClientConnState(*ccs)
- })
- if !ok {
- // If we are unable to schedule a function with the serializer, it
- // indicates that it has been closed. A serializer is only closed when
- // the wrapper is closed or is in idle.
- ccb.mu.Unlock()
- return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer")
- }
- ccb.mu.Unlock()
-
- // We get here only if the above call to Schedule succeeds, in which case it
- // is guaranteed that the scheduled function will run. Therefore it is safe
- // to block on this channel.
- err := <-errCh
- if logger.V(2) && err != nil {
- logger.Infof("error from balancer.UpdateClientConnState: %v", err)
- }
- return err
-}
-
-// updateSubConnState is invoked by grpc to push a subConn state update to the
-// underlying balancer.
-func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) {
- ccb.mu.Lock()
- ccb.serializer.Schedule(func(_ context.Context) {
- // Even though it is optional for balancers, gracefulswitch ensures
- // opts.StateListener is set, so this cannot ever be nil.
- sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
- })
- ccb.mu.Unlock()
-}
-
-func (ccb *ccBalancerWrapper) resolverError(err error) {
- ccb.mu.Lock()
- ccb.serializer.Schedule(func(_ context.Context) {
- ccb.balancer.ResolverError(err)
- })
- ccb.mu.Unlock()
-}
-
-// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the
-// LB policy identified by name.
-//
-// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the
-// first good update from the name resolver, it determines the LB policy to use
-// and invokes the switchTo() method. Upon receipt of every subsequent update
-// from the name resolver, it invokes this method.
-//
-// the ccBalancerWrapper keeps track of the current LB policy name, and skips
-// the graceful balancer switching process if the name does not change.
-func (ccb *ccBalancerWrapper) switchTo(name string) {
- ccb.mu.Lock()
- ccb.serializer.Schedule(func(_ context.Context) {
- // TODO: Other languages use case-sensitive balancer registries. We should
- // switch as well. See: https://github.com/grpc/grpc-go/issues/5288.
- if strings.EqualFold(ccb.curBalancerName, name) {
- return
- }
- ccb.buildLoadBalancingPolicy(name)
- })
- ccb.mu.Unlock()
-}
-
-// buildLoadBalancingPolicy performs the following:
-// - retrieve a balancer builder for the given name. Use the default LB
-// policy, pick_first, if no LB policy with name is found in the registry.
-// - instruct the gracefulswitch balancer to switch to the above builder. This
-// will actually build the new balancer.
-// - update the `curBalancerName` field
-//
-// Must be called from a serializer callback.
-func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) {
- builder := balancer.Get(name)
- if builder == nil {
- channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name)
- builder = newPickfirstBuilder()
- } else {
- channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name)
- }
-
- if err := ccb.balancer.SwitchTo(builder); err != nil {
- channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err)
- return
- }
- ccb.curBalancerName = builder.Name()
-}
-
-func (ccb *ccBalancerWrapper) close() {
- channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing")
- ccb.closeBalancer(ccbModeClosed)
-}
-
-// enterIdleMode is invoked by grpc when the channel enters idle mode upon
-// expiry of idle_timeout. This call blocks until the balancer is closed.
-func (ccb *ccBalancerWrapper) enterIdleMode() {
- channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode")
- ccb.closeBalancer(ccbModeIdle)
-}
-
-// closeBalancer is invoked when the channel is being closed or when it enters
-// idle mode upon expiry of idle_timeout.
-func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) {
- ccb.mu.Lock()
- if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle {
- ccb.mu.Unlock()
- return
- }
-
- ccb.mode = m
- done := ccb.serializer.Done()
- b := ccb.balancer
- ok := ccb.serializer.Schedule(func(_ context.Context) {
- // Close the serializer to ensure that no more calls from gRPC are sent
- // to the balancer.
- ccb.serializerCancel()
- // Empty the current balancer name because we don't have a balancer
- // anymore and also so that we act on the next call to switchTo by
- // creating a new balancer specified by the new resolver.
- ccb.curBalancerName = ""
- })
- if !ok {
- ccb.mu.Unlock()
- return
- }
- ccb.mu.Unlock()
-
- // Give enqueued callbacks a chance to finish before closing the balancer.
- <-done
- b.Close()
-}
-
-// exitIdleMode is invoked by grpc when the channel exits idle mode either
-// because of an RPC or because of an invocation of the Connect() API. This
-// recreates the balancer that was closed previously when entering idle mode.
-//
-// If the channel is not in idle mode, we know for a fact that we are here as a
-// result of the user calling the Connect() method on the ClientConn. In this
-// case, we can simply forward the call to the underlying balancer, instructing
-// it to reconnect to the backends.
-func (ccb *ccBalancerWrapper) exitIdleMode() {
- ccb.mu.Lock()
- if ccb.mode == ccbModeClosed {
- // Request to exit idle is a no-op when wrapper is already closed.
- ccb.mu.Unlock()
- return
- }
-
- if ccb.mode == ccbModeIdle {
- // Recreate the serializer which was closed when we entered idle.
- ctx, cancel := context.WithCancel(context.Background())
- ccb.serializer = grpcsync.NewCallbackSerializer(ctx)
- ccb.serializerCancel = cancel
- }
-
- // The ClientConn guarantees that mutual exclusion between close() and
- // exitIdleMode(), and since we just created a new serializer, we can be
- // sure that the below function will be scheduled.
- done := make(chan struct{})
- ccb.serializer.Schedule(func(_ context.Context) {
- defer close(done)
-
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
-
- if ccb.mode != ccbModeIdle {
- ccb.balancer.ExitIdle()
- return
- }
-
- // Gracefulswitch balancer does not support a switchTo operation after
- // being closed. Hence we need to create a new one here.
- ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
- ccb.mode = ccbModeActive
- channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode")
-
- })
- ccb.mu.Unlock()
-
- <-done
-}
-
-func (ccb *ccBalancerWrapper) isIdleOrClosed() bool {
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed
-}
-
-func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
- if ccb.isIdleOrClosed() {
- return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle")
- }
-
- if len(addrs) == 0 {
- return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
- }
- ac, err := ccb.cc.newAddrConn(addrs, opts)
- if err != nil {
- channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
- return nil, err
- }
- acbw := &acBalancerWrapper{
- ccb: ccb,
- ac: ac,
- producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
- stateListener: opts.StateListener,
- }
- ac.acbw = acbw
- return acbw, nil
-}
-
-func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
- // The graceful switch balancer will never call this.
- logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
-}
-
-func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
- if ccb.isIdleOrClosed() {
- return
- }
-
- acbw, ok := sc.(*acBalancerWrapper)
- if !ok {
- return
- }
- acbw.UpdateAddresses(addrs)
-}
-
-func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
- if ccb.isIdleOrClosed() {
- return
- }
-
- // Update picker before updating state. Even though the ordering here does
- // not matter, it can lead to multiple calls of Pick in the common start-up
- // case where we wait for ready and then perform an RPC. If the picker is
- // updated later, we could call the "connecting" picker when the state is
- // updated, and then call the "ready" picker after the picker gets updated.
- ccb.cc.blockingpicker.updatePicker(s.Picker)
- ccb.cc.csMgr.updateState(s.ConnectivityState)
-}
-
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
- if ccb.isIdleOrClosed() {
- return
- }
-
- ccb.cc.resolveNow(o)
-}
-
-func (ccb *ccBalancerWrapper) Target() string {
- return ccb.cc.target
-}
-
-// acBalancerWrapper is a wrapper on top of ac for balancers.
-// It implements balancer.SubConn interface.
-type acBalancerWrapper struct {
- ac *addrConn // read-only
- ccb *ccBalancerWrapper // read-only
- stateListener func(balancer.SubConnState)
-
- mu sync.Mutex
- producers map[balancer.ProducerBuilder]*refCountedProducer
-}
-
-func (acbw *acBalancerWrapper) String() string {
- return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int())
-}
-
-func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
- acbw.ac.updateAddrs(addrs)
-}
-
-func (acbw *acBalancerWrapper) Connect() {
- go acbw.ac.connect()
-}
-
-func (acbw *acBalancerWrapper) Shutdown() {
- ccb := acbw.ccb
- if ccb.isIdleOrClosed() {
- // It it safe to ignore this call when the balancer is closed or in idle
- // because the ClientConn takes care of closing the connections.
- //
- // Not returning early from here when the balancer is closed or in idle
- // leads to a deadlock though, because of the following sequence of
- // calls when holding cc.mu:
- // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close -->
- // ccb.RemoveAddrConn --> cc.removeAddrConn
- return
- }
-
- ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
-}
-
-// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
-// ready, blocks until it is or ctx expires. Returns an error when the context
-// expires or the addrConn is shut down.
-func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
- transport, err := acbw.ac.getTransport(ctx)
- if err != nil {
- return nil, err
- }
- return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
-}
-
-// Invoke performs a unary RPC. If the addrConn is not ready, returns
-// errSubConnNotReady.
-func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
- cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
- if err != nil {
- return err
- }
- if err := cs.SendMsg(args); err != nil {
- return err
- }
- return cs.RecvMsg(reply)
-}
-
-type refCountedProducer struct {
- producer balancer.Producer
- refs int // number of current refs to the producer
- close func() // underlying producer's close function
-}
-
-func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
-
- // Look up existing producer from this builder.
- pData := acbw.producers[pb]
- if pData == nil {
- // Not found; create a new one and add it to the producers map.
- p, close := pb.Build(acbw)
- pData = &refCountedProducer{producer: p, close: close}
- acbw.producers[pb] = pData
- }
- // Account for this new reference.
- pData.refs++
-
- // Return a cleanup function wrapped in a OnceFunc to remove this reference
- // and delete the refCountedProducer from the map if the total reference
- // count goes to zero.
- unref := func() {
- acbw.mu.Lock()
- pData.refs--
- if pData.refs == 0 {
- defer pData.close() // Run outside the acbw mutex
- delete(acbw.producers, pb)
- }
- acbw.mu.Unlock()
- }
- return pData.producer, grpcsync.OnceFunc(unref)
-}
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
new file mode 100644
index 000000000..2a4f2878a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -0,0 +1,375 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/balancer/gracefulswitch"
+ "google.golang.org/grpc/internal/channelz"
+ "google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/status"
+)
+
+var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+
+// ccBalancerWrapper sits between the ClientConn and the Balancer.
+//
+// ccBalancerWrapper implements methods corresponding to the ones on the
+// balancer.Balancer interface. The ClientConn is free to call these methods
+// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
+// to the Balancer happen in order by performing them in the serializer, without
+// any mutexes held.
+//
+// ccBalancerWrapper also implements the balancer.ClientConn interface and is
+// passed to the Balancer implementations. It invokes unexported methods on the
+// ClientConn to handle these calls from the Balancer.
+//
+// It uses the gracefulswitch.Balancer internally to ensure that balancer
+// switches happen in a graceful manner.
+type ccBalancerWrapper struct {
+ // The following fields are initialized when the wrapper is created and are
+ // read-only afterwards, and therefore can be accessed without a mutex.
+ cc *ClientConn
+ opts balancer.BuildOptions
+ serializer *grpcsync.CallbackSerializer
+ serializerCancel context.CancelFunc
+
+ // The following fields are only accessed within the serializer or during
+ // initialization.
+ curBalancerName string
+ balancer *gracefulswitch.Balancer
+
+ // The following field is protected by mu. Caller must take cc.mu before
+ // taking mu.
+ mu sync.Mutex
+ closed bool
+}
+
+// newCCBalancerWrapper creates a new balancer wrapper in idle state. The
+// underlying balancer is not created until the updateClientConnState() method
+// is invoked.
+func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
+ ctx, cancel := context.WithCancel(cc.ctx)
+ ccb := &ccBalancerWrapper{
+ cc: cc,
+ opts: balancer.BuildOptions{
+ DialCreds: cc.dopts.copts.TransportCredentials,
+ CredsBundle: cc.dopts.copts.CredsBundle,
+ Dialer: cc.dopts.copts.Dialer,
+ Authority: cc.authority,
+ CustomUserAgent: cc.dopts.copts.UserAgent,
+ ChannelzParent: cc.channelz,
+ Target: cc.parsedTarget,
+ MetricsRecorder: cc.metricsRecorderList,
+ },
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
+ }
+ ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
+ return ccb
+}
+
+// updateClientConnState is invoked by grpc to push a ClientConnState update to
+// the underlying balancer. This is always executed from the serializer, so
+// it is safe to call into the balancer here.
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+ errCh := make(chan error)
+ uccs := func(ctx context.Context) {
+ defer close(errCh)
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
+ name := gracefulswitch.ChildName(ccs.BalancerConfig)
+ if ccb.curBalancerName != name {
+ ccb.curBalancerName = name
+ channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name)
+ }
+ err := ccb.balancer.UpdateClientConnState(*ccs)
+ if logger.V(2) && err != nil {
+ logger.Infof("error from balancer.UpdateClientConnState: %v", err)
+ }
+ errCh <- err
+ }
+ onFailure := func() { close(errCh) }
+
+ // UpdateClientConnState can race with Close, and when the latter wins, the
+ // serializer is closed, and the attempt to schedule the callback will fail.
+ // It is acceptable to ignore this failure. But since we want to handle the
+ // state update in a blocking fashion (when we successfully schedule the
+ // callback), we have to use the ScheduleOr method and not the MaybeSchedule
+ // method on the serializer.
+ ccb.serializer.ScheduleOr(uccs, onFailure)
+ return <-errCh
+}
+
+// resolverError is invoked by grpc to push a resolver error to the underlying
+// balancer. The call to the balancer is executed from the serializer.
+func (ccb *ccBalancerWrapper) resolverError(err error) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
+ ccb.balancer.ResolverError(err)
+ })
+}
+
+// close initiates async shutdown of the wrapper. cc.mu must be held when
+// calling this function. To determine the wrapper has finished shutting down,
+// the channel should block on ccb.serializer.Done() without cc.mu held.
+func (ccb *ccBalancerWrapper) close() {
+ ccb.mu.Lock()
+ ccb.closed = true
+ ccb.mu.Unlock()
+ channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
+ ccb.serializer.TrySchedule(func(context.Context) {
+ if ccb.balancer == nil {
+ return
+ }
+ ccb.balancer.Close()
+ ccb.balancer = nil
+ })
+ ccb.serializerCancel()
+}
+
+// exitIdle invokes the balancer's exitIdle method in the serializer.
+func (ccb *ccBalancerWrapper) exitIdle() {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
+ ccb.balancer.ExitIdle()
+ })
+}
+
+func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+ ccb.cc.mu.Lock()
+ defer ccb.cc.mu.Unlock()
+
+ ccb.mu.Lock()
+ if ccb.closed {
+ ccb.mu.Unlock()
+ return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed")
+ }
+ ccb.mu.Unlock()
+
+ if len(addrs) == 0 {
+ return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
+ }
+ ac, err := ccb.cc.newAddrConnLocked(addrs, opts)
+ if err != nil {
+ channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
+ return nil, err
+ }
+ acbw := &acBalancerWrapper{
+ ccb: ccb,
+ ac: ac,
+ producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
+ stateListener: opts.StateListener,
+ }
+ ac.acbw = acbw
+ return acbw, nil
+}
+
+func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
+ // The graceful switch balancer will never call this.
+ logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
+}
+
+func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+ acbw, ok := sc.(*acBalancerWrapper)
+ if !ok {
+ return
+ }
+ acbw.UpdateAddresses(addrs)
+}
+
+func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+ ccb.cc.mu.Lock()
+ defer ccb.cc.mu.Unlock()
+ if ccb.cc.conns == nil {
+ // The CC has been closed; ignore this update.
+ return
+ }
+
+ ccb.mu.Lock()
+ if ccb.closed {
+ ccb.mu.Unlock()
+ return
+ }
+ ccb.mu.Unlock()
+ // Update picker before updating state. Even though the ordering here does
+ // not matter, it can lead to multiple calls of Pick in the common start-up
+ // case where we wait for ready and then perform an RPC. If the picker is
+ // updated later, we could call the "connecting" picker when the state is
+ // updated, and then call the "ready" picker after the picker gets updated.
+
+ // Note that there is no need to check if the balancer wrapper was closed,
+ // as we know the graceful switch LB policy will not call cc if it has been
+ // closed.
+ ccb.cc.pickerWrapper.updatePicker(s.Picker)
+ ccb.cc.csMgr.updateState(s.ConnectivityState)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
+ ccb.cc.mu.RLock()
+ defer ccb.cc.mu.RUnlock()
+
+ ccb.mu.Lock()
+ if ccb.closed {
+ ccb.mu.Unlock()
+ return
+ }
+ ccb.mu.Unlock()
+ ccb.cc.resolveNowLocked(o)
+}
+
+func (ccb *ccBalancerWrapper) Target() string {
+ return ccb.cc.target
+}
+
+// acBalancerWrapper is a wrapper on top of ac for balancers.
+// It implements balancer.SubConn interface.
+type acBalancerWrapper struct {
+ ac *addrConn // read-only
+ ccb *ccBalancerWrapper // read-only
+ stateListener func(balancer.SubConnState)
+
+ producersMu sync.Mutex
+ producers map[balancer.ProducerBuilder]*refCountedProducer
+}
+
+// updateState is invoked by grpc to push a subConn state update to the
+// underlying balancer.
+func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || acbw.ccb.balancer == nil {
+ return
+ }
+ // Invalidate all producers on any state change.
+ acbw.closeProducers()
+
+ // Even though it is optional for balancers, gracefulswitch ensures
+ // opts.StateListener is set, so this cannot ever be nil.
+ // TODO: delete this comment when UpdateSubConnState is removed.
+ scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
+ if s == connectivity.Ready {
+ setConnectedAddress(&scs, curAddr)
+ }
+ acbw.stateListener(scs)
+ })
+}
+
+func (acbw *acBalancerWrapper) String() string {
+ return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID)
+}
+
+func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
+ acbw.ac.updateAddrs(addrs)
+}
+
+func (acbw *acBalancerWrapper) Connect() {
+ go acbw.ac.connect()
+}
+
+func (acbw *acBalancerWrapper) Shutdown() {
+ acbw.closeProducers()
+ acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
+}
+
+// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
+// ready, blocks until it is or ctx expires. Returns an error when the context
+// expires or the addrConn is shut down.
+func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+ transport := acbw.ac.getReadyTransport()
+ if transport == nil {
+ return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
+
+ }
+ return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
+}
+
+// Invoke performs a unary RPC. If the addrConn is not ready, returns
+// errSubConnNotReady.
+func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
+ cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
+ if err != nil {
+ return err
+ }
+ if err := cs.SendMsg(args); err != nil {
+ return err
+ }
+ return cs.RecvMsg(reply)
+}
+
+type refCountedProducer struct {
+ producer balancer.Producer
+ refs int // number of current refs to the producer
+ close func() // underlying producer's close function
+}
+
+func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
+ acbw.producersMu.Lock()
+ defer acbw.producersMu.Unlock()
+
+ // Look up existing producer from this builder.
+ pData := acbw.producers[pb]
+ if pData == nil {
+ // Not found; create a new one and add it to the producers map.
+ p, closeFn := pb.Build(acbw)
+ pData = &refCountedProducer{producer: p, close: closeFn}
+ acbw.producers[pb] = pData
+ }
+ // Account for this new reference.
+ pData.refs++
+
+ // Return a cleanup function wrapped in a OnceFunc to remove this reference
+ // and delete the refCountedProducer from the map if the total reference
+ // count goes to zero.
+ unref := func() {
+ acbw.producersMu.Lock()
+ // If closeProducers has already closed this producer instance, refs is
+ // set to 0, so the check after decrementing will never pass, and the
+ // producer will not be double-closed.
+ pData.refs--
+ if pData.refs == 0 {
+ defer pData.close() // Run outside the acbw mutex
+ delete(acbw.producers, pb)
+ }
+ acbw.producersMu.Unlock()
+ }
+ return pData.producer, grpcsync.OnceFunc(unref)
+}
+
+func (acbw *acBalancerWrapper) closeProducers() {
+ acbw.producersMu.Lock()
+ defer acbw.producersMu.Unlock()
+ for pb, pData := range acbw.producers {
+ pData.refs = 0
+ pData.close()
+ delete(acbw.producers, pb)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 595480112..55bffaa77 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,8 +18,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.22.0
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
package grpc_binarylog_v1
@@ -430,7 +430,7 @@ type ClientHeader struct {
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
// A single process may be used to run multiple virtual
// servers with different identities.
- // The authority is the name of such a server identitiy.
+ // The authority is the name of such a server identity.
// It is typically a portion of the URI in the form of
// or : .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
@@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
-var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
(GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
(GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
(Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type
@@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*GrpcLogEntry); i {
case 0:
return &v.state
@@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ClientHeader); i {
case 0:
return &v.state
@@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ServerHeader); i {
case 0:
return &v.state
@@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Trailer); i {
case 0:
return &v.state
@@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Message); i {
case 0:
return &v.state
@@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Metadata); i {
case 0:
return &v.state
@@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*MetadataEntry); i {
case 0:
return &v.state
@@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*Address); i {
case 0:
return &v.state
@@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
}
}
}
- file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
(*GrpcLogEntry_ClientHeader)(nil),
(*GrpcLogEntry_ServerHeader)(nil),
(*GrpcLogEntry_Message)(nil),
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index ff7fea102..19763f8ed 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -24,6 +24,7 @@ import (
"fmt"
"math"
"net/url"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -31,16 +32,15 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
- "google.golang.org/grpc/internal/pretty"
iresolver "google.golang.org/grpc/internal/resolver"
+ "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@@ -48,9 +48,9 @@ import (
"google.golang.org/grpc/status"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
- _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
_ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver.
+ _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
)
const (
@@ -69,12 +69,14 @@ var (
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
- // errConnIdling indicates the the connection is being closed as the channel
+ // errConnIdling indicates the connection is being closed as the channel
// is moving to an idle mode due to inactivity.
errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
+ // PickFirstBalancerName is the name of the pick_first balancer.
+ PickFirstBalancerName = pickfirst.Name
)
// The following errors are returned from Dial and DialContext
@@ -103,11 +105,6 @@ const (
defaultReadBufSize = 32 * 1024
)
-// Dial creates a client connection to the given target.
-func Dial(target string, opts ...DialOption) (*ClientConn, error) {
- return DialContext(context.Background(), target, opts...)
-}
-
type defaultConfigSelector struct {
sc *ServiceConfig
}
@@ -119,47 +116,30 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires
}, nil
}
-// DialContext creates a client connection to the given target. By default, it's
-// a non-blocking dial (the function won't wait for connections to be
-// established, and connecting happens in the background). To make it a blocking
-// dial, use WithBlock() dial option.
-//
-// In the non-blocking case, the ctx does not act against the connection. It
-// only controls the setup steps.
-//
-// In the blocking case, ctx can be used to cancel or expire the pending
-// connection. Once this function returns, the cancellation and expiration of
-// ctx will be noop. Users should call ClientConn.Close to terminate all the
-// pending operations after this function returns.
+// NewClient creates a new gRPC "channel" for the target URI provided. No I/O
+// is performed. Use of the ClientConn for RPCs will automatically cause it to
+// connect. Connect may be used to manually create a connection, but for most
+// users this is unnecessary.
//
// The target name syntax is defined in
-// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
-func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns
+// resolver, a "dns:///" prefix should be applied to the target.
+//
+// The DialOptions returned by WithBlock, WithTimeout,
+// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
+// function.
+func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
target: target,
conns: make(map[*addrConn]struct{}),
dopts: defaultDialOptions(),
- czData: new(channelzData),
}
- // We start the channel off in idle mode, but kick it out of idle at the end
- // of this method, instead of waiting for the first RPC. Other gRPC
- // implementations do wait for the first RPC to kick the channel out of
- // idle. But doing so would be a major behavior change for our users who are
- // used to seeing the channel active after Dial.
- //
- // Taking this approach of kicking it out of idle at the end of this method
- // allows us to share the code between channel creation and exiting idle
- // mode. This will also make it easy for us to switch to starting the
- // channel off in idle, if at all we ever get to do that.
- cc.idlenessState = ccIdlenessStateIdle
-
cc.retryThrottler.Store((*retryThrottler)(nil))
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
cc.ctx, cc.cancel = context.WithCancel(context.Background())
- cc.exitIdleCond = sync.NewCond(&cc.mu)
+ // Apply dial options.
disableGlobalOpts := false
for _, opt := range opts {
if _, ok := opt.(*disableGlobalDialOptions); ok {
@@ -178,26 +158,24 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
opt.apply(&cc.dopts)
}
- chainUnaryClientInterceptors(cc)
- chainStreamClientInterceptors(cc)
-
- defer func() {
- if err != nil {
- cc.Close()
- }
- }()
+ // Determine the resolver to use.
+ if err := cc.initParsedTargetAndResolverBuilder(); err != nil {
+ return nil, err
+ }
- // Register ClientConn with channelz.
- cc.channelzRegistration(target)
+ for _, opt := range globalPerTargetDialOptions {
+ opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts)
+ }
- cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID)
+ chainUnaryClientInterceptors(cc)
+ chainStreamClientInterceptors(cc)
if err := cc.validateTransportCredentials(); err != nil {
return nil, err
}
if cc.dopts.defaultServiceConfigRawJSON != nil {
- scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+ scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts)
if scpr.Err != nil {
return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
}
@@ -205,10 +183,71 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
cc.mkp = cc.dopts.copts.KeepaliveParams
- if cc.dopts.copts.UserAgent != "" {
- cc.dopts.copts.UserAgent += " " + grpcUA
- } else {
- cc.dopts.copts.UserAgent = grpcUA
+ if err = cc.initAuthority(); err != nil {
+ return nil, err
+ }
+
+ // Register ClientConn with channelz. Note that this is only done after
+ // channel creation cannot fail.
+ cc.channelzRegistration(target)
+ channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget)
+ channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
+
+ cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
+ cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
+
+ cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+
+ cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
+ cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
+
+ return cc, nil
+}
+
+// Dial calls DialContext(context.Background(), target, opts...).
+//
+// Deprecated: use NewClient instead. Will be supported throughout 1.x.
+func Dial(target string, opts ...DialOption) (*ClientConn, error) {
+ return DialContext(context.Background(), target, opts...)
+}
+
+// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is
+// used, it calls Connect and WaitForStateChange until either the context
+// expires or the state of the ClientConn is Ready.
+//
+// One subtle difference between NewClient and Dial and DialContext is that the
+// former uses "dns" as the default name resolver, while the latter use
+// "passthrough" for backward compatibility. This distinction should not matter
+// to most users, but could matter to legacy users that specify a custom dialer
+// and expect it to receive the target string directly.
+//
+// Deprecated: use NewClient instead. Will be supported throughout 1.x.
+func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+ // At the end of this method, we kick the channel out of idle, rather than
+ // waiting for the first rpc.
+ opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...)
+ cc, err := NewClient(target, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ // We start the channel off in idle mode, but kick it out of idle now,
+ // instead of waiting for the first RPC. This is the legacy behavior of
+ // Dial.
+ defer func() {
+ if err != nil {
+ cc.Close()
+ }
+ }()
+
+ // This creates the name resolver, load balancer, etc.
+ if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+ return nil, err
+ }
+
+ // Return now for non-blocking dials.
+ if !cc.dopts.block {
+ return cc, nil
}
if cc.dopts.timeout > 0 {
@@ -231,49 +270,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
}
}()
- if cc.dopts.bs == nil {
- cc.dopts.bs = backoff.DefaultExponential
- }
-
- // Determine the resolver to use.
- if err := cc.parseTargetAndFindResolver(); err != nil {
- return nil, err
- }
- if err = cc.determineAuthority(); err != nil {
- return nil, err
- }
-
- if cc.dopts.scChan != nil {
- // Blocking wait for the initial service config.
- select {
- case sc, ok := <-cc.dopts.scChan:
- if ok {
- cc.sc = &sc
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
- }
- case <-ctx.Done():
- return nil, ctx.Err()
- }
- }
- if cc.dopts.scChan != nil {
- go cc.scWatcher()
- }
-
- // This creates the name resolver, load balancer, blocking picker etc.
- if err := cc.exitIdleMode(); err != nil {
- return nil, err
- }
-
- // Configure idleness support with configured idle timeout or default idle
- // timeout duration. Idleness can be explicitly disabled by the user, by
- // setting the dial option to 0.
- cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger})
-
- // Return early for non-blocking dials.
- if !cc.dopts.block {
- return cc, nil
- }
-
// A blocking dial blocks until the clientConn is ready.
for {
s := cc.GetState()
@@ -305,23 +301,23 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
// addTraceEvent is a helper method to add a trace event on the channel. If the
// channel is a nested one, the same event is also added on the parent channel.
func (cc *ClientConn) addTraceEvent(msg string) {
- ted := &channelz.TraceEventDesc{
+ ted := &channelz.TraceEvent{
Desc: fmt.Sprintf("Channel %s", msg),
Severity: channelz.CtInfo,
}
- if cc.dopts.channelzParentID != nil {
- ted.Parent = &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg),
+ if cc.dopts.channelzParent != nil {
+ ted.Parent = &channelz.TraceEvent{
+ Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg),
Severity: channelz.CtInfo,
}
}
- channelz.AddTraceEvent(logger, cc.channelzID, 0, ted)
+ channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
}
type idler ClientConn
-func (i *idler) EnterIdleMode() error {
- return (*ClientConn)(i).enterIdleMode()
+func (i *idler) EnterIdleMode() {
+ (*ClientConn)(i).enterIdleMode()
}
func (i *idler) ExitIdleMode() error {
@@ -329,117 +325,71 @@ func (i *idler) ExitIdleMode() error {
}
// exitIdleMode moves the channel out of idle mode by recreating the name
-// resolver and load balancer.
-func (cc *ClientConn) exitIdleMode() error {
+// resolver and load balancer. This should never be called directly; use
+// cc.idlenessMgr.ExitIdleMode instead.
+func (cc *ClientConn) exitIdleMode() (err error) {
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return errConnClosing
}
- if cc.idlenessState != ccIdlenessStateIdle {
- cc.mu.Unlock()
- channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState)
- return nil
- }
-
- defer func() {
- // When Close() and exitIdleMode() race against each other, one of the
- // following two can happen:
- // - Close() wins the race and runs first. exitIdleMode() runs after, and
- // sees that the ClientConn is already closed and hence returns early.
- // - exitIdleMode() wins the race and runs first and recreates the balancer
- // and releases the lock before recreating the resolver. If Close() runs
- // in this window, it will wait for exitIdleMode to complete.
- //
- // We achieve this synchronization using the below condition variable.
- cc.mu.Lock()
- cc.idlenessState = ccIdlenessStateActive
- cc.exitIdleCond.Signal()
- cc.mu.Unlock()
- }()
-
- cc.idlenessState = ccIdlenessStateExitingIdle
- exitedIdle := false
- if cc.blockingpicker == nil {
- cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers)
- } else {
- cc.blockingpicker.exitIdleMode()
- exitedIdle = true
- }
-
- var credsClone credentials.TransportCredentials
- if creds := cc.dopts.copts.TransportCredentials; creds != nil {
- credsClone = creds.Clone()
- }
- if cc.balancerWrapper == nil {
- cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{
- DialCreds: credsClone,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- Authority: cc.authority,
- CustomUserAgent: cc.dopts.copts.UserAgent,
- ChannelzParentID: cc.channelzID,
- Target: cc.parsedTarget,
- })
- } else {
- cc.balancerWrapper.exitIdleMode()
- }
- cc.firstResolveEvent = grpcsync.NewEvent()
cc.mu.Unlock()
// This needs to be called without cc.mu because this builds a new resolver
- // which might update state or report error inline which needs to be handled
- // by cc.updateResolverState() which also grabs cc.mu.
- if err := cc.initResolverWrapper(credsClone); err != nil {
+ // which might update state or report error inline, which would then need to
+ // acquire cc.mu.
+ if err := cc.resolverWrapper.start(); err != nil {
return err
}
- if exitedIdle {
- cc.addTraceEvent("exiting idle mode")
- }
+ cc.addTraceEvent("exiting idle mode")
return nil
}
+// initIdleStateLocked initializes common state to how it should be while idle.
+func (cc *ClientConn) initIdleStateLocked() {
+ cc.resolverWrapper = newCCResolverWrapper(cc)
+ cc.balancerWrapper = newCCBalancerWrapper(cc)
+ cc.firstResolveEvent = grpcsync.NewEvent()
+ // cc.conns == nil is a proxy for the ClientConn being closed. So, instead
+ // of setting it to nil here, we recreate the map. This also means that we
+ // don't have to do this when exiting idle mode.
+ cc.conns = make(map[*addrConn]struct{})
+}
+
// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
-// name resolver, load balancer and any subchannels.
-func (cc *ClientConn) enterIdleMode() error {
+// name resolver, load balancer, and any subchannels. This should never be
+// called directly; use cc.idlenessMgr.EnterIdleMode instead.
+func (cc *ClientConn) enterIdleMode() {
cc.mu.Lock()
+
if cc.conns == nil {
cc.mu.Unlock()
- return ErrClientConnClosing
- }
- if cc.idlenessState != ccIdlenessStateActive {
- channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState)
- cc.mu.Unlock()
- return nil
+ return
}
- // cc.conns == nil is a proxy for the ClientConn being closed. So, instead
- // of setting it to nil here, we recreate the map. This also means that we
- // don't have to do this when exiting idle mode.
conns := cc.conns
- cc.conns = make(map[*addrConn]struct{})
- // TODO: Currently, we close the resolver wrapper upon entering idle mode
- // and create a new one upon exiting idle mode. This means that the
- // `cc.resolverWrapper` field would be overwritten everytime we exit idle
- // mode. While this means that we need to hold `cc.mu` when accessing
- // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should
- // try to do the same for the balancer and picker wrappers too.
- cc.resolverWrapper.close()
- cc.blockingpicker.enterIdleMode()
- cc.balancerWrapper.enterIdleMode()
+ rWrapper := cc.resolverWrapper
+ rWrapper.close()
+ cc.pickerWrapper.reset()
+ bWrapper := cc.balancerWrapper
+ bWrapper.close()
cc.csMgr.updateState(connectivity.Idle)
- cc.idlenessState = ccIdlenessStateIdle
+ cc.addTraceEvent("entering idle mode")
+
+ cc.initIdleStateLocked()
+
cc.mu.Unlock()
- go func() {
- cc.addTraceEvent("entering idle mode")
- for ac := range conns {
- ac.tearDown(errConnIdling)
- }
- }()
- return nil
+ // Block until the name resolver and LB policy are closed.
+ <-rWrapper.serializer.Done()
+ <-bWrapper.serializer.Done()
+
+ // Close all subchannels after the LB policy is closed.
+ for ac := range conns {
+ ac.tearDown(errConnIdling)
+ }
}
// validateTransportCredentials performs a series of checks on the configured
@@ -478,14 +428,15 @@ func (cc *ClientConn) validateTransportCredentials() error {
}
// channelzRegistration registers the newly created ClientConn with channelz and
-// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`.
-// A channelz trace event is emitted for ClientConn creation. If the newly
-// created ClientConn is a nested one, i.e a valid parent ClientConn ID is
-// specified via a dial option, the trace event is also added to the parent.
+// stores the returned identifier in `cc.channelz`. A channelz trace event is
+// emitted for ClientConn creation. If the newly created ClientConn is a nested
+// one, i.e a valid parent ClientConn ID is specified via a dial option, the
+// trace event is also added to the parent.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
func (cc *ClientConn) channelzRegistration(target string) {
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
+ parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
+ cc.channelz = channelz.RegisterChannel(parentChannel, target)
cc.addTraceEvent("created")
}
@@ -552,11 +503,11 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr
}
// newConnectivityStateManager creates an connectivityStateManager with
-// the specified id.
-func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager {
+// the specified channel.
+func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager {
return &connectivityStateManager{
- channelzID: id,
- pubSub: grpcsync.NewPubSub(ctx),
+ channelz: channel,
+ pubSub: grpcsync.NewPubSub(ctx),
}
}
@@ -570,7 +521,7 @@ type connectivityStateManager struct {
mu sync.Mutex
state connectivity.State
notifyChan chan struct{}
- channelzID *channelz.Identifier
+ channelz *channelz.Channel
pubSub *grpcsync.PubSub
}
@@ -587,9 +538,10 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
return
}
csm.state = state
+ csm.channelz.ChannelMetrics.State.Store(&state)
csm.pubSub.Publish(state)
- channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state)
+ channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state)
if csm.notifyChan != nil {
// There are other goroutines waiting on this channel.
close(csm.notifyChan)
@@ -643,79 +595,43 @@ type ClientConn struct {
cancel context.CancelFunc // Cancelled on close.
// The following are initialized at dial time, and are read-only after that.
- target string // User's dial target.
- parsedTarget resolver.Target // See parseTargetAndFindResolver().
- authority string // See determineAuthority().
- dopts dialOptions // Default and user specified dial options.
- channelzID *channelz.Identifier // Channelz identifier for the channel.
- resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
- balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath.
- idlenessMgr idle.Manager
+ target string // User's dial target.
+ parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
+ authority string // See initAuthority().
+ dopts dialOptions // Default and user specified dial options.
+ channelz *channelz.Channel // Channelz object.
+ resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
+ idlenessMgr *idle.Manager
+ metricsRecorderList *stats.MetricsRecorderList
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
csMgr *connectivityStateManager
- blockingpicker *pickerWrapper
+ pickerWrapper *pickerWrapper
safeConfigSelector iresolver.SafeConfigSelector
- czData *channelzData
retryThrottler atomic.Value // Updated from service config.
- // firstResolveEvent is used to track whether the name resolver sent us at
- // least one update. RPCs block on this event.
- firstResolveEvent *grpcsync.Event
-
// mu protects the following fields.
// TODO: split mu so the same mutex isn't used for everything.
mu sync.RWMutex
- resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close.
+ resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close.
+ balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close.
sc *ServiceConfig // Latest service config received from the resolver.
conns map[*addrConn]struct{} // Set to nil on close.
mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway.
- idlenessState ccIdlenessState // Tracks idleness state of the channel.
- exitIdleCond *sync.Cond // Signalled when channel exits idle.
+ // firstResolveEvent is used to track whether the name resolver sent us at
+ // least one update. RPCs block on this event. May be accessed without mu
+ // if we know we cannot be asked to enter idle mode while accessing it (e.g.
+ // when the idle manager has already been closed, or if we are already
+ // entering idle mode).
+ firstResolveEvent *grpcsync.Event
lceMu sync.Mutex // protects lastConnectionError
lastConnectionError error
}
-// ccIdlenessState tracks the idleness state of the channel.
-//
-// Channels start off in `active` and move to `idle` after a period of
-// inactivity. When moving back to `active` upon an incoming RPC, they
-// transition through `exiting_idle`. This state is useful for synchronization
-// with Close().
-//
-// This state tracking is mostly for self-protection. The idlenessManager is
-// expected to keep track of the state as well, and is expected not to call into
-// the ClientConn unnecessarily.
-type ccIdlenessState int8
-
-const (
- ccIdlenessStateActive ccIdlenessState = iota
- ccIdlenessStateIdle
- ccIdlenessStateExitingIdle
-)
-
-func (s ccIdlenessState) String() string {
- switch s {
- case ccIdlenessStateActive:
- return "active"
- case ccIdlenessStateIdle:
- return "idle"
- case ccIdlenessStateExitingIdle:
- return "exitingIdle"
- default:
- return "unknown"
- }
-}
-
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
@@ -730,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
}
// GetState returns the connectivity.State of ClientConn.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
-// release.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
@@ -748,29 +659,15 @@ func (cc *ClientConn) GetState() connectivity.State {
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) Connect() {
- cc.exitIdleMode()
+ if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+ cc.addTraceEvent(err.Error())
+ return
+ }
// If the ClientConn was not in idle mode, we need to call ExitIdle on the
// LB policy so that connections can be created.
- cc.balancerWrapper.exitIdleMode()
-}
-
-func (cc *ClientConn) scWatcher() {
- for {
- select {
- case sc, ok := <-cc.dopts.scChan:
- if !ok {
- return
- }
- cc.mu.Lock()
- // TODO: load balance policy runtime change is ignored.
- // We may revisit this decision in the future.
- cc.sc = &sc
- cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc})
- cc.mu.Unlock()
- case <-cc.ctx.Done():
- return
- }
- }
+ cc.mu.Lock()
+ cc.balancerWrapper.exitIdle()
+ cc.mu.Unlock()
}
// waitForResolvedAddrs blocks until the resolver has provided addresses or the
@@ -795,7 +692,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
var emptyServiceConfig *ServiceConfig
func init() {
- cfg := parseServiceConfig("{}")
+ cfg := parseServiceConfig("{}", defaultMaxCallAttempts)
if cfg.Err != nil {
panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
}
@@ -804,23 +701,28 @@ func init() {
internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
return cc.csMgr.pubSub.Subscribe(s)
}
+ internal.EnterIdleModeForTesting = func(cc *ClientConn) {
+ cc.idlenessMgr.EnterIdleModeForTesting()
+ }
+ internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
+ return cc.idlenessMgr.ExitIdleMode()
+ }
}
-func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
+func (cc *ClientConn) maybeApplyDefaultServiceConfig() {
if cc.sc != nil {
- cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs)
+ cc.applyServiceConfigAndBalancer(cc.sc, nil)
return
}
if cc.dopts.defaultServiceConfig != nil {
- cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs)
+ cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig})
} else {
- cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs)
+ cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig})
}
}
-func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
+func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error {
defer cc.firstResolveEvent.Fire()
- cc.mu.Lock()
// Check if the ClientConn is already closed. Some fields (e.g.
// balancerWrapper) are set to nil when closing the ClientConn, and could
// cause nil pointer panic if we don't have this check.
@@ -833,7 +735,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
// May need to apply the initial service config in case the resolver
// doesn't support service configs, or doesn't provide a service config
// with the new addresses.
- cc.maybeApplyDefaultServiceConfig(nil)
+ cc.maybeApplyDefaultServiceConfig()
cc.balancerWrapper.resolverError(err)
@@ -844,10 +746,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
var ret error
if cc.dopts.disableServiceConfig {
- channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
- cc.maybeApplyDefaultServiceConfig(s.Addresses)
+ channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
+ cc.maybeApplyDefaultServiceConfig()
} else if s.ServiceConfig == nil {
- cc.maybeApplyDefaultServiceConfig(s.Addresses)
+ cc.maybeApplyDefaultServiceConfig()
// TODO: do we need to apply a failing LB policy if there is no
// default, per the error handling design?
} else {
@@ -855,18 +757,18 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
configSelector := iresolver.GetConfigSelector(s)
if configSelector != nil {
if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
- channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector")
+ channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector")
}
} else {
configSelector = &defaultConfigSelector{sc}
}
- cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses)
+ cc.applyServiceConfigAndBalancer(sc, configSelector)
} else {
ret = balancer.ErrBadResolverState
if cc.sc == nil {
// Apply the failing LB only if we haven't received valid service config
// from the name resolver in the past.
- cc.applyFailingLB(s.ServiceConfig)
+ cc.applyFailingLBLocked(s.ServiceConfig)
cc.mu.Unlock()
return ret
}
@@ -875,7 +777,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
var balCfg serviceconfig.LoadBalancingConfig
if cc.sc != nil && cc.sc.lbConfig != nil {
- balCfg = cc.sc.lbConfig.cfg
+ balCfg = cc.sc.lbConfig
}
bw := cc.balancerWrapper
cc.mu.Unlock()
@@ -888,15 +790,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
return ret
}
-// applyFailingLB is akin to configuring an LB policy on the channel which
+// applyFailingLBLocked is akin to configuring an LB policy on the channel which
// always fails RPCs. Here, an actual LB policy is not configured, but an always
// erroring picker is configured, which returns errors with information about
// what was invalid in the received service config. A config selector with no
// service config is configured, and the connectivity state of the channel is
// set to TransientFailure.
-//
-// Caller must hold cc.mu.
-func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
+func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
var err error
if sc.Err != nil {
err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
@@ -904,64 +804,50 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) {
err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
}
cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
- cc.blockingpicker.updatePicker(base.NewErrPicker(err))
+ cc.pickerWrapper.updatePicker(base.NewErrPicker(err))
cc.csMgr.updateState(connectivity.TransientFailure)
}
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
- cc.balancerWrapper.updateSubConnState(sc, s, err)
-}
-
-// Makes a copy of the input addresses slice and clears out the balancer
-// attributes field. Addresses are passed during subconn creation and address
-// update operations. In both cases, we will clear the balancer attributes by
-// calling this function, and therefore we will be able to use the Equal method
-// provided by the resolver.Address type for comparison.
-func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
+// Makes a copy of the input addresses slice. Addresses are passed during
+// subconn creation and address update operations.
+func copyAddresses(in []resolver.Address) []resolver.Address {
out := make([]resolver.Address, len(in))
- for i := range in {
- out[i] = in[i]
- out[i].BalancerAttributes = nil
- }
+ copy(out, in)
return out
}
-// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
+// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns.
//
// Caller needs to make sure len(addrs) > 0.
-func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+ if cc.conns == nil {
+ return nil, ErrClientConnClosing
+ }
+
ac := &addrConn{
state: connectivity.Idle,
cc: cc,
- addrs: copyAddressesWithoutBalancerAttributes(addrs),
+ addrs: copyAddresses(addrs),
scopts: opts,
dopts: cc.dopts,
- czData: new(channelzData),
+ channelz: channelz.RegisterSubChannel(cc.channelz, ""),
resetBackoff: make(chan struct{}),
- stateChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
- // Track ac in cc. This needs to be done before any getTransport(...) is called.
- cc.mu.Lock()
- defer cc.mu.Unlock()
- if cc.conns == nil {
- return nil, ErrClientConnClosing
- }
+ // Start with our address set to the first address; this may be updated if
+ // we connect to different addresses.
+ ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr)
- var err error
- ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "")
- if err != nil {
- return nil, err
- }
- channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
+ channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
Desc: "Subchannel created",
Severity: channelz.CtInfo,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()),
+ Parent: &channelz.TraceEvent{
+ Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID),
Severity: channelz.CtInfo,
},
})
+ // Track ac in cc. This needs to be done before any getTransport(...) is called.
cc.conns[ac] = struct{}{}
return ac, nil
}
@@ -979,38 +865,27 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
ac.tearDown(err)
}
-func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
- return &channelz.ChannelInternalMetric{
- State: cc.GetState(),
- Target: cc.target,
- CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)),
- }
-}
-
// Target returns the target string of the ClientConn.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
func (cc *ClientConn) Target() string {
return cc.target
}
+// CanonicalTarget returns the canonical target string of the ClientConn.
+func (cc *ClientConn) CanonicalTarget() string {
+ return cc.parsedTarget.String()
+}
+
func (cc *ClientConn) incrCallsStarted() {
- atomic.AddInt64(&cc.czData.callsStarted, 1)
- atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano())
+ cc.channelz.ChannelMetrics.CallsStarted.Add(1)
+ cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
}
func (cc *ClientConn) incrCallsSucceeded() {
- atomic.AddInt64(&cc.czData.callsSucceeded, 1)
+ cc.channelz.ChannelMetrics.CallsSucceeded.Add(1)
}
func (cc *ClientConn) incrCallsFailed() {
- atomic.AddInt64(&cc.czData.callsFailed, 1)
+ cc.channelz.ChannelMetrics.CallsFailed.Add(1)
}
// connect starts creating a transport.
@@ -1032,32 +907,37 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
- ac.mu.Unlock()
- ac.resetTransport()
+ ac.resetTransportAndUnlock()
return nil
}
-func equalAddresses(a, b []resolver.Address) bool {
- if len(a) != len(b) {
- return false
- }
- for i, v := range a {
- if !v.Equal(b[i]) {
- return false
- }
- }
- return true
+// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
+// This is different from the Equal method on the resolver.Address type which
+// considers all fields to determine equality. Here, we only consider fields
+// that are meaningful to the subConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes) &&
+ a.Metadata == b.Metadata
+}
+
+func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
+ return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
}
// updateAddrs updates ac.addrs with the new addresses list and handles active
// connections or connection attempts.
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
- ac.mu.Lock()
- channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs))
+ addrs = copyAddresses(addrs)
+ limit := len(addrs)
+ if limit > 5 {
+ limit = 5
+ }
+ channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
- addrs = copyAddressesWithoutBalancerAttributes(addrs)
- if equalAddresses(ac.addrs, addrs) {
+ ac.mu.Lock()
+ if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
ac.mu.Unlock()
return
}
@@ -1076,7 +956,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
// Try to find the connected address.
for _, a := range addrs {
a.ServerName = ac.cc.getServerName(a)
- if a.Equal(ac.curAddr) {
+ if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
// We are connected to a valid address, so do nothing but
// update the addresses.
ac.mu.Unlock()
@@ -1102,11 +982,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
ac.updateConnectivityState(connectivity.Idle, nil)
}
- ac.mu.Unlock()
-
// Since we were connecting/connected, we should start a new connection
// attempt.
- go ac.resetTransport()
+ go ac.resetTransportAndUnlock()
}
// getServerName determines the serverName to be used in the connection
@@ -1168,13 +1046,13 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
}
func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) {
- return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{
+ return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{
Ctx: ctx,
FullMethodName: method,
})
}
-func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) {
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) {
if sc == nil {
// should never reach here.
return
@@ -1195,27 +1073,16 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel
} else {
cc.retryThrottler.Store((*retryThrottler)(nil))
}
-
- var newBalancerName string
- if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) {
- // No service config or no LB policy specified in config.
- newBalancerName = PickFirstBalancerName
- } else if cc.sc.lbConfig != nil {
- newBalancerName = cc.sc.lbConfig.name
- } else { // cc.sc.LB != nil
- newBalancerName = *cc.sc.LB
- }
- cc.balancerWrapper.switchTo(newBalancerName)
}
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
cc.mu.RLock()
- r := cc.resolverWrapper
+ cc.resolverWrapper.resolveNow(o)
cc.mu.RUnlock()
- if r == nil {
- return
- }
- go r.resolveNow(o)
+}
+
+func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) {
+ cc.resolverWrapper.resolveNow(o)
}
// ResetConnectBackoff wakes up all subchannels in transient failure and causes
@@ -1247,49 +1114,46 @@ func (cc *ClientConn) Close() error {
<-cc.csMgr.pubSub.Done()
}()
+ // Prevent calls to enter/exit idle immediately, and ensure we are not
+ // currently entering/exiting idle mode.
+ cc.idlenessMgr.Close()
+
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return ErrClientConnClosing
}
- for cc.idlenessState == ccIdlenessStateExitingIdle {
- cc.exitIdleCond.Wait()
- }
-
conns := cc.conns
cc.conns = nil
cc.csMgr.updateState(connectivity.Shutdown)
- pWrapper := cc.blockingpicker
- rWrapper := cc.resolverWrapper
- bWrapper := cc.balancerWrapper
- idlenessMgr := cc.idlenessMgr
+ // We can safely unlock and continue to access all fields now as
+ // cc.conns==nil, preventing any further operations on cc.
cc.mu.Unlock()
+ cc.resolverWrapper.close()
// The order of closing matters here since the balancer wrapper assumes the
// picker is closed before it is closed.
- if pWrapper != nil {
- pWrapper.close()
- }
- if bWrapper != nil {
- bWrapper.close()
- }
- if rWrapper != nil {
- rWrapper.close()
- }
- if idlenessMgr != nil {
- idlenessMgr.Close()
- }
+ cc.pickerWrapper.close()
+ cc.balancerWrapper.close()
+ <-cc.resolverWrapper.serializer.Done()
+ <-cc.balancerWrapper.serializer.Done()
+ var wg sync.WaitGroup
for ac := range conns {
- ac.tearDown(ErrClientConnClosing)
+ wg.Add(1)
+ go func(ac *addrConn) {
+ defer wg.Done()
+ ac.tearDown(ErrClientConnClosing)
+ }(ac)
}
+ wg.Wait()
cc.addTraceEvent("deleted")
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
// trace reference to the entity being deleted, and thus prevent it from being
// deleted right away.
- channelz.RemoveEntry(cc.channelzID)
+ channelz.RemoveEntry(cc.channelz.ID)
return nil
}
@@ -1301,7 +1165,7 @@ type addrConn struct {
cc *ClientConn
dopts dialOptions
- acbw balancer.SubConn
+ acbw *acBalancerWrapper
scopts balancer.NewSubConnOptions
// transport is set when there's a viable transport (note: ac state may not be READY as LB channel
@@ -1310,19 +1174,21 @@ type addrConn struct {
// is received, transport is closed, ac has been torn down).
transport transport.ClientTransport // The current transport.
+ // This mutex is used on the RPC path, so its usage should be minimized as
+ // much as possible.
+ // TODO: Find a lock-free way to retrieve the transport and state from the
+ // addrConn.
mu sync.Mutex
curAddr resolver.Address // The current address.
addrs []resolver.Address // All addresses that the resolver resolved to.
// Use updateConnectivityState for updating addrConn's connectivity state.
- state connectivity.State
- stateChan chan struct{} // closed and recreated on every state change.
+ state connectivity.State
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
- channelzID *channelz.Identifier
- czData *channelzData
+ channelz *channelz.SubChannel
}
// Note: this requires a lock on ac.mu.
@@ -1330,16 +1196,14 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
- // When changing states, reset the state change channel.
- close(ac.stateChan)
- ac.stateChan = make(chan struct{})
ac.state = s
+ ac.channelz.ChannelMetrics.State.Store(&s)
if lastErr == nil {
- channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s)
+ channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s)
} else {
- channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
+ channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
- ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr)
+ ac.acbw.updateState(s, ac.curAddr, lastErr)
}
// adjustParams updates parameters used to create transports upon
@@ -1356,8 +1220,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
}
}
-func (ac *addrConn) resetTransport() {
- ac.mu.Lock()
+// resetTransportAndUnlock unconditionally connects the addrConn.
+//
+// ac.mu must be held by the caller, and this function will guarantee it is released.
+func (ac *addrConn) resetTransportAndUnlock() {
acCtx := ac.ctx
if acCtx.Err() != nil {
ac.mu.Unlock()
@@ -1388,6 +1254,8 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
+ // TODO: #7534 - Move re-resolution requests into the pick_first LB policy
+ // to ensure one resolution request per pass instead of per subconn failure.
ac.cc.resolveNow(resolver.ResolveNowOptions{})
ac.mu.Lock()
if acCtx.Err() != nil {
@@ -1429,12 +1297,13 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
}
-// tryAllAddrs tries to creates a connection to the addresses, and stop when at
+// tryAllAddrs tries to create a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
var firstConnErr error
for _, addr := range addrs {
+ ac.channelz.ChannelMetrics.Target.Store(&addr.Addr)
if ctx.Err() != nil {
return errConnClosing
}
@@ -1450,7 +1319,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c
}
ac.mu.Unlock()
- channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr)
+ channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr)
err := ac.createTransport(ctx, addr, copts, connectDeadline)
if err == nil {
@@ -1503,7 +1372,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address,
connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
defer cancel()
- copts.ChannelzParentID = ac.channelzID
+ copts.ChannelzParent = ac.channelz
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose)
if err != nil {
@@ -1512,7 +1381,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address,
}
// newTr is either nil, or closed.
hcancel()
- channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
+ channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
return err
}
@@ -1584,7 +1453,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
// The health package is not imported to set health check function.
//
// TODO: add a link to the health check doc in the error message.
- channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.")
+ channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.")
return
}
@@ -1614,9 +1483,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) {
err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
if err != nil {
if status.Code(err) == codes.Unimplemented {
- channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled")
+ channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled")
} else {
- channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err)
+ channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err)
}
}
}()
@@ -1640,29 +1509,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
return nil
}
-// getTransport waits until the addrconn is ready and returns the transport.
-// If the context expires first, returns an appropriate status. If the
-// addrConn is stopped first, returns an Unavailable status error.
-func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
- for ctx.Err() == nil {
- ac.mu.Lock()
- t, state, sc := ac.transport, ac.state, ac.stateChan
- ac.mu.Unlock()
- if state == connectivity.Ready {
- return t, nil
- }
- if state == connectivity.Shutdown {
- return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
- }
-
- select {
- case <-ctx.Done():
- case <-sc:
- }
- }
- return nil, status.FromContextError(ctx.Err()).Err()
-}
-
// tearDown starts to tear down the addrConn.
//
// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
@@ -1681,18 +1527,18 @@ func (ac *addrConn) tearDown(err error) {
ac.cancel()
ac.curAddr = resolver.Address{}
- channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{
+ channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
Desc: "Subchannel deleted",
Severity: channelz.CtInfo,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()),
+ Parent: &channelz.TraceEvent{
+ Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID),
Severity: channelz.CtInfo,
},
})
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
// trace reference to the entity being deleted, and thus prevent it from
// being deleted right away.
- channelz.RemoveEntry(ac.channelzID)
+ channelz.RemoveEntry(ac.channelz.ID)
ac.mu.Unlock()
// We have to release the lock before the call to GracefulClose/Close here
@@ -1709,7 +1555,7 @@ func (ac *addrConn) tearDown(err error) {
} else {
// Hard close the transport when the channel is entering idle or is
// being shutdown. In the case where the channel is being shutdown,
- // closing of transports is also taken care of by cancelation of cc.ctx.
+ // closing of transports is also taken care of by cancellation of cc.ctx.
// But in the case where the channel is entering idle, we need to
// explicitly close the transports here. Instead of distinguishing
// between these two cases, it is simpler to close the transport
@@ -1719,39 +1565,6 @@ func (ac *addrConn) tearDown(err error) {
}
}
-func (ac *addrConn) getState() connectivity.State {
- ac.mu.Lock()
- defer ac.mu.Unlock()
- return ac.state
-}
-
-func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
- ac.mu.Lock()
- addr := ac.curAddr.Addr
- ac.mu.Unlock()
- return &channelz.ChannelInternalMetric{
- State: ac.getState(),
- Target: addr,
- CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)),
- }
-}
-
-func (ac *addrConn) incrCallsStarted() {
- atomic.AddInt64(&ac.czData.callsStarted, 1)
- atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano())
-}
-
-func (ac *addrConn) incrCallsSucceeded() {
- atomic.AddInt64(&ac.czData.callsSucceeded, 1)
-}
-
-func (ac *addrConn) incrCallsFailed() {
- atomic.AddInt64(&ac.czData.callsFailed, 1)
-}
-
type retryThrottler struct {
max float64
thresh float64
@@ -1789,12 +1602,17 @@ func (rt *retryThrottler) successfulRPC() {
}
}
-type channelzChannel struct {
- cc *ClientConn
+func (ac *addrConn) incrCallsStarted() {
+ ac.channelz.ChannelMetrics.CallsStarted.Add(1)
+ ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
+}
+
+func (ac *addrConn) incrCallsSucceeded() {
+ ac.channelz.ChannelMetrics.CallsSucceeded.Add(1)
}
-func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
- return c.cc.channelzMetric()
+func (ac *addrConn) incrCallsFailed() {
+ ac.channelz.ChannelMetrics.CallsFailed.Add(1)
}
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
@@ -1828,22 +1646,19 @@ func (cc *ClientConn) connectionError() error {
return cc.lastConnectionError
}
-// parseTargetAndFindResolver parses the user's dial target and stores the
-// parsed target in `cc.parsedTarget`.
+// initParsedTargetAndResolverBuilder parses the user's dial target and stores
+// the parsed target in `cc.parsedTarget`.
//
// The resolver to use is determined based on the scheme in the parsed target
// and the same is stored in `cc.resolverBuilder`.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
-func (cc *ClientConn) parseTargetAndFindResolver() error {
- channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target)
+func (cc *ClientConn) initParsedTargetAndResolverBuilder() error {
+ logger.Infof("original dial target is: %q", cc.target)
var rb resolver.Builder
parsedTarget, err := parseTarget(cc.target)
- if err != nil {
- channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err)
- } else {
- channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
+ if err == nil {
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb != nil {
cc.parsedTarget = parsedTarget
@@ -1855,17 +1670,19 @@ func (cc *ClientConn) parseTargetAndFindResolver() error {
// We are here because the user's dial target did not contain a scheme or
// specified an unregistered scheme. We should fallback to the default
// scheme, except when a custom dialer is specified in which case, we should
- // always use passthrough scheme.
- defScheme := resolver.GetDefaultScheme()
- channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme)
+ // always use passthrough scheme. For either case, we need to respect any overridden
+ // global defaults set by the user.
+ defScheme := cc.dopts.defaultScheme
+ if internal.UserSetDefaultScheme {
+ defScheme = resolver.GetDefaultScheme()
+ }
+
canonicalTarget := defScheme + ":///" + cc.target
parsedTarget, err = parseTarget(canonicalTarget)
if err != nil {
- channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err)
return err
}
- channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb == nil {
return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
@@ -1887,6 +1704,8 @@ func parseTarget(target string) (resolver.Target, error) {
return resolver.Target{URL: *u}, nil
}
+// encodeAuthority escapes the authority string based on valid chars defined in
+// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.
func encodeAuthority(authority string) string {
const upperhex = "0123456789ABCDEF"
@@ -1903,7 +1722,7 @@ func encodeAuthority(authority string) string {
return false
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
return false
- case ':', '[', ']', '@': // Authority related delimeters
+ case ':', '[', ']', '@': // Authority related delimiters
return false
}
// Everything else must be escaped.
@@ -1953,7 +1772,7 @@ func encodeAuthority(authority string) string {
// credentials do not match the authority configured through the dial option.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
-func (cc *ClientConn) determineAuthority() error {
+func (cc *ClientConn) initAuthority() error {
dopts := cc.dopts
// Historically, we had two options for users to specify the serverName or
// authority for a channel. One was through the transport credentials
@@ -1975,58 +1794,16 @@ func (cc *ClientConn) determineAuthority() error {
}
endpoint := cc.parsedTarget.Endpoint()
- target := cc.target
- switch {
- case authorityFromDialOption != "":
+ if authorityFromDialOption != "" {
cc.authority = authorityFromDialOption
- case authorityFromCreds != "":
+ } else if authorityFromCreds != "" {
cc.authority = authorityFromCreds
- case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"):
- // TODO: remove when the unix resolver implements optional interface to
- // return channel authority.
- cc.authority = "localhost"
- case strings.HasPrefix(endpoint, ":"):
+ } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
+ cc.authority = auth.OverrideAuthority(cc.parsedTarget)
+ } else if strings.HasPrefix(endpoint, ":") {
cc.authority = "localhost" + endpoint
- default:
- // TODO: Define an optional interface on the resolver builder to return
- // the channel authority given the user's dial target. For resolvers
- // which don't implement this interface, we will use the endpoint from
- // "scheme://authority/endpoint" as the default authority.
- // Escape the endpoint to handle use cases where the endpoint
- // might not be a valid authority by default.
- // For example an endpoint which has multiple paths like
- // 'a/b/c', which is not a valid authority by default.
+ } else {
cc.authority = encodeAuthority(endpoint)
}
- channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority)
- return nil
-}
-
-// initResolverWrapper creates a ccResolverWrapper, which builds the name
-// resolver. This method grabs the lock to assign the newly built resolver
-// wrapper to the cc.resolverWrapper field.
-func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error {
- rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{
- target: cc.parsedTarget,
- builder: cc.resolverBuilder,
- bOpts: resolver.BuildOptions{
- DisableServiceConfig: cc.dopts.disableServiceConfig,
- DialCreds: creds,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- },
- channelzID: cc.channelzID,
- })
- if err != nil {
- return fmt.Errorf("failed to build resolver: %v", err)
- }
- // Resolver implementations may report state update or error inline when
- // built (or right after), and this is handled in cc.updateResolverState.
- // Also, an error from the resolver might lead to a re-resolution request
- // from the balancer, which is handled in resolveNow() where
- // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here.
- cc.mu.Lock()
- cc.resolverWrapper = rw
- cc.mu.Unlock()
return nil
}
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
index 411e3dfd4..e840858b7 100644
--- a/vendor/google.golang.org/grpc/codec.go
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -21,18 +21,73 @@ package grpc
import (
"google.golang.org/grpc/encoding"
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
+ "google.golang.org/grpc/mem"
)
-// baseCodec contains the functionality of both Codec and encoding.Codec, but
-// omits the name/string, which vary between the two and are not needed for
-// anything besides the registry in the encoding package.
+// baseCodec captures the new encoding.CodecV2 interface without the Name
+// function, allowing it to be implemented by older Codec and encoding.Codec
+// implementations. The omitted Name function is only needed for the register in
+// the encoding package and is not part of the core functionality.
type baseCodec interface {
- Marshal(v any) ([]byte, error)
- Unmarshal(data []byte, v any) error
+ Marshal(v any) (mem.BufferSlice, error)
+ Unmarshal(data mem.BufferSlice, v any) error
+}
+
+// getCodec returns an encoding.CodecV2 for the codec of the given name (if
+// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
+// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
+// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
+// to turn it into an encoding.CodecV2. Returns nil otherwise.
+func getCodec(name string) encoding.CodecV2 {
+ if codecV1 := encoding.GetCodec(name); codecV1 != nil {
+ return newCodecV1Bridge(codecV1)
+ }
+
+ return encoding.GetCodecV2(name)
+}
+
+func newCodecV0Bridge(c Codec) baseCodec {
+ return codecV0Bridge{codec: c}
+}
+
+func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
+ return codecV1Bridge{
+ codecV0Bridge: codecV0Bridge{codec: c},
+ name: c.Name(),
+ }
+}
+
+var _ baseCodec = codecV0Bridge{}
+
+type codecV0Bridge struct {
+ codec interface {
+ Marshal(v any) ([]byte, error)
+ Unmarshal(data []byte, v any) error
+ }
+}
+
+func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
+ data, err := c.codec.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil
+}
+
+func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
+ return c.codec.Unmarshal(data.Materialize(), v)
}
-var _ baseCodec = Codec(nil)
-var _ baseCodec = encoding.Codec(nil)
+var _ encoding.CodecV2 = codecV1Bridge{}
+
+type codecV1Bridge struct {
+ codecV0Bridge
+ name string
+}
+
+func (c codecV1Bridge) Name() string {
+ return c.name
+}
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
deleted file mode 100644
index 4cdc6ba7c..000000000
--- a/vendor/google.golang.org/grpc/codegen.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-
-# This script serves as an example to demonstrate how to generate the gRPC-Go
-# interface and the related messages from .proto file.
-#
-# It assumes the installation of i) Google proto buffer compiler at
-# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
-# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
-# not, please install them first.
-#
-# We recommend running this script at $GOPATH/src.
-#
-# If this is not what you need, feel free to make your own scripts. Again, this
-# script is for demonstration purpose.
-#
-proto=$1
-protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index 11b106182..0b42c302b 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -25,7 +25,13 @@ import (
"strconv"
)
-// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
+// A Code is a status code defined according to the [gRPC documentation].
+//
+// Only the codes defined as consts in this package are valid codes. Do not use
+// other code values. Behavior of other codes is implementation-specific and
+// interoperability between implementations is not guaranteed.
+//
+// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
type Code uint32
const (
@@ -229,7 +235,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
if ci >= _maxCode {
- return fmt.Errorf("invalid code: %q", ci)
+ return fmt.Errorf("invalid code: %d", ci)
}
*c = Code(ci)
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index 5feac3aa0..665e790bb 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -28,9 +28,9 @@ import (
"fmt"
"net"
- "github.com/golang/protobuf/proto"
"google.golang.org/grpc/attributes"
icredentials "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/protobuf/proto"
)
// PerRPCCredentials defines the common interface for the credentials which need to
@@ -237,7 +237,7 @@ func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
}
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
-// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
+// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
//
// This API is experimental.
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
index 82bee1443..4c805c644 100644
--- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials {
// NoSecurity.
type insecureTC struct{}
-func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
}
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index 877b7cd21..e163a473d 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -27,9 +27,13 @@ import (
"net/url"
"os"
+ "google.golang.org/grpc/grpclog"
credinternal "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/grpc/internal/envconfig"
)
+var logger = grpclog.Component("credentials")
+
// TLSInfo contains the auth information for a TLS authenticated connection.
// It implements the AuthInfo interface.
type TLSInfo struct {
@@ -44,10 +48,25 @@ func (t TLSInfo) AuthType() string {
return "tls"
}
+// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
+func cipherSuiteLookup(cipherSuiteID uint16) string {
+ for _, s := range tls.CipherSuites() {
+ if s.ID == cipherSuiteID {
+ return s.Name
+ }
+ }
+ for _, s := range tls.InsecureCipherSuites() {
+ if s.ID == cipherSuiteID {
+ return s.Name
+ }
+ }
+ return fmt.Sprintf("unknown ID: %v", cipherSuiteID)
+}
+
// GetSecurityValue returns security info requested by channelz.
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
v := &TLSChannelzSecurityValue{
- StandardName: cipherSuiteLookup[t.State.CipherSuite],
+ StandardName: cipherSuiteLookup(t.State.CipherSuite),
}
// Currently there's no way to get LocalCertificate info from tls package.
if len(t.State.PeerCertificates) > 0 {
@@ -97,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
conn.Close()
return nil, nil, ctx.Err()
}
+
+ // The negotiated protocol can be either of the following:
+ // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since
+ // it is the only protocol advertised by the client during the handshake.
+ // The tls library ensures that the server chooses a protocol advertised
+ // by the client.
+ // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement
+ // for using HTTP/2 over TLS. We can terminate the connection immediately.
+ np := conn.ConnectionState().NegotiatedProtocol
+ if np == "" {
+ if envconfig.EnforceALPNEnabled {
+ conn.Close()
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
+ }
+ logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
+ }
tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
@@ -116,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
conn.Close()
return nil, nil, err
}
+ cs := conn.ConnectionState()
+ // The negotiated application protocol can be empty only if the client doesn't
+ // support ALPN. In such cases, we can close the connection since ALPN is required
+ // for using HTTP/2 over TLS.
+ if cs.NegotiatedProtocol == "" {
+ if envconfig.EnforceALPNEnabled {
+ conn.Close()
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
+ } else if logger.V(2) {
+ logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
+ }
+ }
tlsInfo := TLSInfo{
- State: conn.ConnectionState(),
+ State: cs,
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
@@ -138,11 +185,55 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
return nil
}
+// The following cipher suites are forbidden for use with HTTP/2 by
+// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+var tls12ForbiddenCipherSuites = map[uint16]struct{}{
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA: {},
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA: {},
+ tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {},
+ tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {},
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {},
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {},
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {},
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {},
+}
+
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
- tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
- tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
- return tc
+ config := applyDefaults(c)
+ if config.GetConfigForClient != nil {
+ oldFn := config.GetConfigForClient
+ config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
+ cfgForClient, err := oldFn(hello)
+ if err != nil || cfgForClient == nil {
+ return cfgForClient, err
+ }
+ return applyDefaults(cfgForClient), nil
+ }
+ }
+ return &tlsCreds{config: config}
+}
+
+func applyDefaults(c *tls.Config) *tls.Config {
+ config := credinternal.CloneTLSConfig(c)
+ config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos)
+ // If the user did not configure a MinVersion and did not configure a
+ // MaxVersion < 1.2, use MinVersion=1.2, which is required by
+ // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
+ if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) {
+ config.MinVersion = tls.VersionTLS12
+ }
+ // If the user did not configure CipherSuites, use all "secure" cipher
+ // suites reported by the TLS package, but remove some explicitly forbidden
+ // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+ if config.CipherSuites == nil {
+ for _, cs := range tls.CipherSuites() {
+ if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
+ config.CipherSuites = append(config.CipherSuites, cs.ID)
+ }
+ }
+ }
+ return config
}
// NewClientTLSFromCert constructs TLS credentials from the provided root
@@ -205,32 +296,3 @@ type TLSChannelzSecurityValue struct {
LocalCertificate []byte
RemoteCertificate []byte
}
-
-var cipherSuiteLookup = map[uint16]string{
- tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
- tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256",
- tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384",
- tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256",
-}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 1fd0d5c12..518692c3a 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -21,6 +21,7 @@ package grpc
import (
"context"
"net"
+ "net/url"
"time"
"google.golang.org/grpc/backoff"
@@ -32,10 +33,16 @@ import (
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
)
+const (
+ // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges
+ defaultMaxCallAttempts = 5
+)
+
func init() {
internal.AddGlobalDialOptions = func(opt ...DialOption) {
globalDialOptions = append(globalDialOptions, opt...)
@@ -43,9 +50,18 @@ func init() {
internal.ClearGlobalDialOptions = func() {
globalDialOptions = nil
}
+ internal.AddGlobalPerTargetDialOptions = func(opt any) {
+ if ptdo, ok := opt.(perTargetDialOption); ok {
+ globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo)
+ }
+ }
+ internal.ClearGlobalPerTargetDialOptions = func() {
+ globalPerTargetDialOptions = nil
+ }
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
+ internal.WithBufferPool = withBufferPool
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@@ -63,12 +79,11 @@ type dialOptions struct {
block bool
returnLastError bool
timeout time.Duration
- scChan <-chan ServiceConfig
authority string
binaryLogger binarylog.Logger
copts transport.ConnectOptions
callOptions []CallOption
- channelzParentID *channelz.Identifier
+ channelzParent channelz.Identifier
disableServiceConfig bool
disableRetry bool
disableHealthCheck bool
@@ -78,7 +93,8 @@ type dialOptions struct {
defaultServiceConfigRawJSON *string
resolvers []resolver.Builder
idleTimeout time.Duration
- recvBufferPool SharedBufferPool
+ defaultScheme string
+ maxCallAttempts int
}
// DialOption configures how we set up the connection.
@@ -88,6 +104,19 @@ type DialOption interface {
var globalDialOptions []DialOption
+// perTargetDialOption takes a parsed target and returns a dial option to apply.
+//
+// This gets called after NewClient() parses the target, and allows per target
+// configuration set through a returned DialOption. The DialOption will not take
+// effect if specifies a resolver builder, as that Dial Option is factored in
+// while parsing target.
+type perTargetDialOption interface {
+ // DialOption returns a Dial Option to apply.
+ DialOptionForTarget(parsedTarget url.URL) DialOption
+}
+
+var globalPerTargetDialOptions []perTargetDialOption
+
// EmptyDialOption does not alter the dial configuration. It can be embedded in
// another structure to build custom dial options.
//
@@ -154,9 +183,7 @@ func WithSharedWriteBuffer(val bool) DialOption {
}
// WithWriteBufferSize determines how much data can be batched before doing a
-// write on the wire. The corresponding memory allocation for this buffer will
-// be twice the size to keep syscalls low. The default value for this buffer is
-// 32KB.
+// write on the wire. The default value for this buffer is 32KB.
//
// Zero or negative values will disable the write buffer such that each write
// will be on underlying connection. Note: A Send call may not directly
@@ -250,19 +277,6 @@ func WithDecompressor(dc Decompressor) DialOption {
})
}
-// WithServiceConfig returns a DialOption which has a channel to read the
-// service configuration.
-//
-// Deprecated: service config should be received through name resolver or via
-// WithDefaultServiceConfig, as specified at
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be
-// removed in a future 1.x release.
-func WithServiceConfig(c <-chan ServiceConfig) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.scChan = c
- })
-}
-
// WithConnectParams configures the ClientConn to use the provided ConnectParams
// for creating and maintaining connections to servers.
//
@@ -314,6 +328,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption {
//
// Use of this feature is not recommended. For more information, please see:
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
func WithBlock() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.block = true
@@ -328,10 +345,8 @@ func WithBlock() DialOption {
// Use of this feature is not recommended. For more information, please see:
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
func WithReturnConnectionError() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.block = true
@@ -401,8 +416,8 @@ func WithCredentialsBundle(b credentials.Bundle) DialOption {
// WithTimeout returns a DialOption that configures a timeout for dialing a
// ClientConn initially. This is valid if and only if WithBlock() is present.
//
-// Deprecated: use DialContext instead of Dial and context.WithTimeout
-// instead. Will be supported throughout 1.x.
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
func WithTimeout(d time.Duration) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.timeout = d
@@ -413,6 +428,17 @@ func WithTimeout(d time.Duration) DialOption {
// connections. If FailOnNonTempDialError() is set to true, and an error is
// returned by f, gRPC checks the error's Temporary() method to decide if it
// should try to reconnect to the network address.
+//
+// Note: All supported releases of Go (as of December 2023) override the OS
+// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+// with OS defaults for keepalive time and interval, use a net.Dialer that sets
+// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
+// option to true from the Control field. For a concrete example of how to do
+// this, see internal.NetDialerWithTCPKeepalive().
+//
+// For more information, please see [issue 23459] in the Go GitHub repo.
+//
+// [issue 23459]: https://github.com/golang/go/issues/23459
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.Dialer = f
@@ -473,9 +499,8 @@ func withBinaryLogger(bl binarylog.Logger) DialOption {
// Use of this feature is not recommended. For more information, please see:
// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// Deprecated: this DialOption is not supported by NewClient.
+// This API may be changed or removed in a
// later release.
func FailOnNonTempDialError(f bool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
@@ -487,12 +512,14 @@ func FailOnNonTempDialError(f bool) DialOption {
// the RPCs.
func WithUserAgent(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.copts.UserAgent = s
+ o.copts.UserAgent = s + " " + grpcUA
})
}
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
// for the client transport.
+//
+// Keepalive is disabled by default.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime {
logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
@@ -557,9 +584,9 @@ func WithAuthority(a string) DialOption {
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
-func WithChannelzParentID(id *channelz.Identifier) DialOption {
+func WithChannelzParentID(c channelz.Identifier) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.channelzParentID = id
+ o.channelzParent = c
})
}
@@ -604,12 +631,22 @@ func WithDisableRetry() DialOption {
})
}
+// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum
+// (uncompressed) size of header list that the client is prepared to accept.
+type MaxHeaderListSizeDialOption struct {
+ MaxHeaderListSize uint32
+}
+
+func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) {
+ do.copts.MaxHeaderListSize = &o.MaxHeaderListSize
+}
+
// WithMaxHeaderListSize returns a DialOption that specifies the maximum
// (uncompressed) size of header list that the client is prepared to accept.
func WithMaxHeaderListSize(s uint32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.MaxHeaderListSize = &s
- })
+ return MaxHeaderListSizeDialOption{
+ MaxHeaderListSize: s,
+ }
}
// WithDisableHealthCheck disables the LB channel health checking for all
@@ -637,17 +674,22 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption {
func defaultDialOptions() dialOptions {
return dialOptions{
- healthCheckFunc: internal.HealthCheckFunc,
copts: transport.ConnectOptions{
- WriteBufferSize: defaultWriteBufSize,
ReadBufferSize: defaultReadBufSize,
+ WriteBufferSize: defaultWriteBufSize,
UseProxy: true,
+ UserAgent: grpcUA,
+ BufferPool: mem.DefaultBufferPool(),
},
- recvBufferPool: nopBufferPool{},
+ bs: internalbackoff.DefaultExponential,
+ healthCheckFunc: internal.HealthCheckFunc,
+ idleTimeout: 30 * time.Minute,
+ defaultScheme: "dns",
+ maxCallAttempts: defaultMaxCallAttempts,
}
}
-// withGetMinConnectDeadline specifies the function that clientconn uses to
+// withMinConnectDeadline specifies the function that clientconn uses to
// get minConnectDeadline. This can be used to make connection attempts happen
// faster/slower.
//
@@ -658,6 +700,14 @@ func withMinConnectDeadline(f func() time.Duration) DialOption {
})
}
+// withDefaultScheme is used to allow Dial to use "passthrough" as the default
+// name resolver, while NewClient uses "dns" otherwise.
+func withDefaultScheme(s string) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.defaultScheme = s
+ })
+}
+
// WithResolvers allows a list of resolver implementations to be registered
// locally with the ClientConn without needing to be globally registered via
// resolver.Register. They will be matched against the scheme used for the
@@ -680,8 +730,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption {
// channel will exit idle mode when the Connect() method is called or when an
// RPC is initiated.
//
-// By default this feature is disabled, which can also be explicitly configured
-// by passing zero to this function.
+// A default timeout of 30 minutes will be used if this dial option is not set
+// at dial time and idleness can be disabled by passing a timeout of zero.
//
// # Experimental
//
@@ -693,23 +743,25 @@ func WithIdleTimeout(d time.Duration) DialOption {
})
}
-// WithRecvBufferPool returns a DialOption that configures the ClientConn
-// to use the provided shared buffer pool for parsing incoming messages. Depending
-// on the application's workload, this could result in reduced memory allocation.
-//
-// If you are unsure about how to implement a memory pool but want to utilize one,
-// begin with grpc.NewSharedBufferPool.
-//
-// Note: The shared buffer pool feature will not be active if any of the following
-// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
-// cases, the shared buffer pool will be ignored.
+// WithMaxCallAttempts returns a DialOption that configures the maximum number
+// of attempts per call (including retries and hedging) using the channel.
+// Service owners may specify a higher value for these parameters, but higher
+// values will be treated as equal to the maximum value by the client
+// implementation. This mitigates security concerns related to the service
+// config being transferred to the client via DNS.
//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
-func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
+// A value of 5 will be used if this dial option is not set or n < 2.
+func WithMaxCallAttempts(n int) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ if n < 2 {
+ n = defaultMaxCallAttempts
+ }
+ o.maxCallAttempts = n
+ })
+}
+
+func withBufferPool(bufferPool mem.BufferPool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.recvBufferPool = bufferPool
+ o.copts.BufferPool = bufferPool
})
}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
index 0022859ad..e7b532b6f 100644
--- a/vendor/google.golang.org/grpc/doc.go
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -16,7 +16,7 @@
*
*/
-//go:generate ./regenerate.sh
+//go:generate ./scripts/regenerate.sh
/*
Package grpc implements an RPC system called gRPC.
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 69d5580b6..11d0ae142 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -38,6 +38,10 @@ const Identity = "identity"
// Compressor is used for compressing and decompressing when sending or
// receiving messages.
+//
+// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`,
+// gRPC will invoke it to determine the size of the buffer allocated for the
+// result of decompression. A return value of -1 indicates unknown size.
type Compressor interface {
// Compress writes the data written to wc to w after compressing it. If an
// error occurs while initializing the compressor, that error is returned
@@ -51,15 +55,6 @@ type Compressor interface {
// coding header. The result must be static; the result cannot change
// between calls.
Name() string
- // If a Compressor implements
- // DecompressedSize(compressedBytes []byte) int, gRPC will call it
- // to determine the size of the buffer allocated for the result of decompression.
- // Return -1 to indicate unknown size.
- //
- // Experimental
- //
- // Notice: This API is EXPERIMENTAL and may be changed or removed in a
- // later release.
}
var registeredCompressor = make(map[string]Compressor)
@@ -99,7 +94,7 @@ type Codec interface {
Name() string
}
-var registeredCodecs = make(map[string]Codec)
+var registeredCodecs = make(map[string]any)
// RegisterCodec registers the provided Codec for use with all gRPC clients and
// servers.
@@ -131,5 +126,6 @@ func RegisterCodec(codec Codec) {
//
// The content-subtype is expected to be lowercase.
func GetCodec(contentSubtype string) Codec {
- return registeredCodecs[contentSubtype]
+ c, _ := registeredCodecs[contentSubtype].(Codec)
+ return c
}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
new file mode 100644
index 000000000..074c5e234
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encoding
+
+import (
+ "strings"
+
+ "google.golang.org/grpc/mem"
+)
+
+// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
+// that implementations of this interface must be thread safe; a CodecV2's
+// methods can be called from concurrent goroutines.
+type CodecV2 interface {
+ // Marshal returns the wire format of v. The buffers in the returned
+ // [mem.BufferSlice] must have at least one reference each, which will be freed
+ // by gRPC when they are no longer needed.
+ Marshal(v any) (out mem.BufferSlice, err error)
+ // Unmarshal parses the wire format into v. Note that data will be freed as soon
+ // as this function returns. If the codec wishes to guarantee access to the data
+ // after this function, it must take its own reference that it frees when it is
+ // no longer needed.
+ Unmarshal(data mem.BufferSlice, v any) error
+ // Name returns the name of the Codec implementation. The returned string
+ // will be used as part of content type in transmission. The result must be
+ // static; the result cannot change between calls.
+ Name() string
+}
+
+// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
+// servers.
+//
+// The CodecV2 will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the CodecV2. This
+// is case-insensitive, and is stored and looked up as lowercase. If the
+// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If both a Codec and CodecV2 are registered with the same name, the CodecV2
+// will be used.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodecV2(codec CodecV2) {
+ if codec == nil {
+ panic("cannot register a nil CodecV2")
+ }
+ if codec.Name() == "" {
+ panic("cannot register CodecV2 with empty string result for Name()")
+ }
+ contentSubtype := strings.ToLower(codec.Name())
+ registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodecV2(contentSubtype string) CodecV2 {
+ c, _ := registeredCodecs[contentSubtype].(CodecV2)
+ return c
+}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index 0ee3d3bae..ceec319dd 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -23,36 +23,74 @@ package proto
import (
"fmt"
- "github.com/golang/protobuf/proto"
"google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/protoadapt"
)
// Name is the name registered for the proto compressor.
const Name = "proto"
func init() {
- encoding.RegisterCodec(codec{})
+ encoding.RegisterCodecV2(&codecV2{})
}
-// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type codec struct{}
+// codec is a CodecV2 implementation with protobuf. It is the default codec for
+// gRPC.
+type codecV2 struct{}
-func (codec) Marshal(v any) ([]byte, error) {
- vv, ok := v.(proto.Message)
- if !ok {
- return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
+func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
+ vv := messageV2Of(v)
+ if vv == nil {
+ return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
- return proto.Marshal(vv)
+
+ size := proto.Size(vv)
+ if mem.IsBelowBufferPoolingThreshold(size) {
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ } else {
+ pool := mem.DefaultBufferPool()
+ buf := pool.Get(size)
+ if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
+ pool.Put(buf)
+ return nil, err
+ }
+ data = append(data, mem.NewBuffer(buf, pool))
+ }
+
+ return data, nil
}
-func (codec) Unmarshal(data []byte, v any) error {
- vv, ok := v.(proto.Message)
- if !ok {
+func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
+ vv := messageV2Of(v)
+ if vv == nil {
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
}
- return proto.Unmarshal(data, vv)
+
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ defer buf.Free()
+ // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
+ // really possible without a major overhaul of the proto package, but the
+ // vtprotobuf library may be able to support this.
+ return proto.Unmarshal(buf.ReadOnlyData(), vv)
+}
+
+func messageV2Of(v any) proto.Message {
+ switch v := v.(type) {
+ case protoadapt.MessageV1:
+ return protoadapt.MessageV2Of(v)
+ case protoadapt.MessageV2:
+ return v
+ }
+
+ return nil
}
-func (codec) Name() string {
+func (c *codecV2) Name() string {
return Name
}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
new file mode 100644
index 000000000..1d827dd5d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -0,0 +1,269 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+ "maps"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
+)
+
+func init() {
+ internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
+}
+
+var logger = grpclog.Component("metrics-registry")
+
+// DefaultMetrics are the default metrics registered through global metrics
+// registry. This is written to at initialization time only, and is read only
+// after initialization.
+var DefaultMetrics = NewMetrics()
+
+// MetricDescriptor is the data for a registered metric.
+type MetricDescriptor struct {
+ // The name of this metric. This name must be unique across the whole binary
+ // (including any per call metrics). See
+ // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
+ // for metric naming conventions.
+ Name Metric
+ // The description of this metric.
+ Description string
+ // The unit (e.g. entries, seconds) of this metric.
+ Unit string
+ // The required label keys for this metric. These are intended to
+ // metrics emitted from a stats handler.
+ Labels []string
+ // The optional label keys for this metric. These are intended to attached
+ // to metrics emitted from a stats handler if configured.
+ OptionalLabels []string
+ // Whether this metric is on by default.
+ Default bool
+ // The type of metric. This is set by the metric registry, and not intended
+ // to be set by a component registering a metric.
+ Type MetricType
+ // Bounds are the bounds of this metric. This only applies to histogram
+ // metrics. If unset or set with length 0, stats handlers will fall back to
+ // default bounds.
+ Bounds []float64
+}
+
+// MetricType is the type of metric.
+type MetricType int
+
+// Type of metric supported by this instrument registry.
+const (
+ MetricTypeIntCount MetricType = iota
+ MetricTypeFloatCount
+ MetricTypeIntHisto
+ MetricTypeFloatHisto
+ MetricTypeIntGauge
+)
+
+// Int64CountHandle is a typed handle for a int count metric. This handle
+// is passed at the recording point in order to know which metric to record
+// on.
+type Int64CountHandle MetricDescriptor
+
+// Descriptor returns the int64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 count value on the metrics recorder provided.
+func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Count(h, incr, labels...)
+}
+
+// Float64CountHandle is a typed handle for a float count metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Float64CountHandle MetricDescriptor
+
+// Descriptor returns the float64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 count value on the metrics recorder provided.
+func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Count(h, incr, labels...)
+}
+
+// Int64HistoHandle is a typed handle for an int histogram metric. This handle
+// is passed at the recording point in order to know which metric to record on.
+type Int64HistoHandle MetricDescriptor
+
+// Descriptor returns the int64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Histo(h, incr, labels...)
+}
+
+// Float64HistoHandle is a typed handle for a float histogram metric. This
+// handle is passed at the recording point in order to know which metric to
+// record on.
+type Float64HistoHandle MetricDescriptor
+
+// Descriptor returns the float64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 histo value on the metrics recorder provided.
+func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Histo(h, incr, labels...)
+}
+
+// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64GaugeHandle MetricDescriptor
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Gauge(h, incr, labels...)
+}
+
+// registeredMetrics are the registered metric descriptor names.
+var registeredMetrics = make(map[Metric]bool)
+
+// metricsRegistry contains all of the registered metrics.
+//
+// This is written to only at init time, and read only after that.
+var metricsRegistry = make(map[Metric]*MetricDescriptor)
+
+// DescriptorForMetric returns the MetricDescriptor from the global registry.
+//
+// Returns nil if MetricDescriptor not present.
+func DescriptorForMetric(metric Metric) *MetricDescriptor {
+ return metricsRegistry[metric]
+}
+
+func registerMetric(name Metric, def bool) {
+ if registeredMetrics[name] {
+ logger.Fatalf("metric %v already registered", name)
+ }
+ registeredMetrics[name] = true
+ if def {
+ DefaultMetrics = DefaultMetrics.Add(name)
+ }
+}
+
+// RegisterInt64Count registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64CountHandle)(descPtr)
+}
+
+// RegisterFloat64Count registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64CountHandle)(descPtr)
+}
+
+// RegisterInt64Histo registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64HistoHandle)(descPtr)
+}
+
+// RegisterFloat64Histo registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64HistoHandle)(descPtr)
+}
+
+// RegisterInt64Gauge registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntGauge
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64GaugeHandle)(descPtr)
+}
+
+// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
+// registry. Returns a cleanup function that sets the metrics registry to its
+// original state.
+func snapshotMetricsRegistryForTesting() func() {
+ oldDefaultMetrics := DefaultMetrics
+ oldRegisteredMetrics := registeredMetrics
+ oldMetricsRegistry := metricsRegistry
+
+ registeredMetrics = make(map[Metric]bool)
+ metricsRegistry = make(map[Metric]*MetricDescriptor)
+ maps.Copy(registeredMetrics, registeredMetrics)
+ maps.Copy(metricsRegistry, metricsRegistry)
+
+ return func() {
+ DefaultMetrics = oldDefaultMetrics
+ registeredMetrics = oldRegisteredMetrics
+ metricsRegistry = oldMetricsRegistry
+ }
+}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
new file mode 100644
index 000000000..3221f7a63
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -0,0 +1,114 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats contains experimental metrics/stats API's.
+package stats
+
+import "maps"
+
+// MetricsRecorder records on metrics derived from metric registry.
+type MetricsRecorder interface {
+ // RecordInt64Count records the measurement alongside labels on the int
+ // count associated with the provided handle.
+ RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
+ // RecordFloat64Count records the measurement alongside labels on the float
+ // count associated with the provided handle.
+ RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
+ // RecordInt64Histo records the measurement alongside labels on the int
+ // histo associated with the provided handle.
+ RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
+ // RecordFloat64Histo records the measurement alongside labels on the float
+ // histo associated with the provided handle.
+ RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
+ // RecordInt64Gauge records the measurement alongside labels on the int
+ // gauge associated with the provided handle.
+ RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+}
+
+// Metric is an identifier for a metric.
+type Metric string
+
+// Metrics is a set of metrics to record. Once created, Metrics is immutable,
+// however Add and Remove can make copies with specific metrics added or
+// removed, respectively.
+//
+// Do not construct directly; use NewMetrics instead.
+type Metrics struct {
+ // metrics are the set of metrics to initialize.
+ metrics map[Metric]bool
+}
+
+// NewMetrics returns a Metrics containing Metrics.
+func NewMetrics(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for _, metric := range metrics {
+ newMetrics[metric] = true
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Metrics returns the metrics set. The returned map is read-only and must not
+// be modified.
+func (m *Metrics) Metrics() map[Metric]bool {
+ return m.metrics
+}
+
+// Add adds the metrics to the metrics set and returns a new copy with the
+// additional metrics.
+func (m *Metrics) Add(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metrics {
+ newMetrics[metric] = true
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Join joins the metrics passed in with the metrics set, and returns a new copy
+// with the merged metrics.
+func (m *Metrics) Join(metrics *Metrics) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ maps.Copy(newMetrics, m.metrics)
+ maps.Copy(newMetrics, metrics.metrics)
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
+
+// Remove removes the metrics from the metrics set and returns a new copy with
+// the metrics removed.
+func (m *Metrics) Remove(metrics ...Metric) *Metrics {
+ newMetrics := make(map[Metric]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metrics {
+ delete(newMetrics, metric)
+ }
+ return &Metrics{
+ metrics: newMetrics,
+ }
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
index ac73c9ced..f1ae080dc 100644
--- a/vendor/google.golang.org/grpc/grpclog/component.go
+++ b/vendor/google.golang.org/grpc/grpclog/component.go
@@ -20,8 +20,6 @@ package grpclog
import (
"fmt"
-
- "google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
@@ -33,22 +31,22 @@ var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.InfoDepth(depth+1, args...)
+ InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.WarningDepth(depth+1, args...)
+ WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.ErrorDepth(depth+1, args...)
+ ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
- grpclog.FatalDepth(depth+1, args...)
+ FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...any) {
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 16928c9cb..db320105e 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -18,18 +18,15 @@
// Package grpclog defines logging for grpc.
//
-// All logs in transport and grpclb packages only go to verbose level 2.
-// All logs in other packages in grpc are logged in spite of the verbosity level.
-//
-// In the default logger,
-// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
-// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
-package grpclog // import "google.golang.org/grpc/grpclog"
+// In the default logger, severity level can be set by environment variable
+// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
+// GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog
import (
"os"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
func init() {
@@ -38,58 +35,58 @@ func init() {
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
- return grpclog.Logger.V(l)
+ return internal.LoggerV2Impl.V(l)
}
// Info logs to the INFO log.
func Info(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
}
// Warning logs to the WARNING log.
func Warning(args ...any) {
- grpclog.Logger.Warning(args...)
+ internal.LoggerV2Impl.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...any) {
- grpclog.Logger.Warningf(format, args...)
+ internal.LoggerV2Impl.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...any) {
- grpclog.Logger.Warningln(args...)
+ internal.LoggerV2Impl.Warningln(args...)
}
// Error logs to the ERROR log.
func Error(args ...any) {
- grpclog.Logger.Error(args...)
+ internal.LoggerV2Impl.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...any) {
- grpclog.Logger.Errorf(format, args...)
+ internal.LoggerV2Impl.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...any) {
- grpclog.Logger.Errorln(args...)
+ internal.LoggerV2Impl.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
func Fatal(args ...any) {
- grpclog.Logger.Fatal(args...)
+ internal.LoggerV2Impl.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -97,15 +94,15 @@ func Fatal(args ...any) {
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...any) {
- grpclog.Logger.Fatalf(format, args...)
+ internal.LoggerV2Impl.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
-// It calle os.Exit()) with exit code 1.
+// It calls os.Exit() with exit code 1.
func Fatalln(args ...any) {
- grpclog.Logger.Fatalln(args...)
+ internal.LoggerV2Impl.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -114,19 +111,76 @@ func Fatalln(args ...any) {
//
// Deprecated: use Info.
func Print(args ...any) {
- grpclog.Logger.Info(args...)
+ internal.LoggerV2Impl.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
//
// Deprecated: use Infof.
func Printf(format string, args ...any) {
- grpclog.Logger.Infof(format, args...)
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
//
// Deprecated: use Infoln.
func Println(args ...any) {
- grpclog.Logger.Infoln(args...)
+ internal.LoggerV2Impl.Infoln(args...)
+}
+
+// InfoDepth logs to the INFO log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InfoDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Infoln(args...)
+ }
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WarningDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Warningln(args...)
+ }
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ErrorDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Errorln(args...)
+ }
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FatalDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Fatalln(args...)
+ }
+ os.Exit(1)
}
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
new file mode 100644
index 000000000..59c03bc14
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the grpclog package.
+package internal
+
+// LoggerV2Impl is the logger used for the non-depth log functions.
+var LoggerV2Impl LoggerV2
+
+// DepthLoggerV2Impl is the logger used for the depth log functions.
+var DepthLoggerV2Impl DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
new file mode 100644
index 000000000..e524fdd40
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger interface {
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Fatalln(args ...any)
+ Print(args ...any)
+ Printf(format string, args ...any)
+ Println(args ...any)
+}
+
+// LoggerWrapper wraps Logger into a LoggerV2.
+type LoggerWrapper struct {
+ Logger
+}
+
+// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Info(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Infoln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Infof(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Warning(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Warningln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Warningf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Error(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Errorln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Errorf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (*LoggerWrapper) V(int) bool {
+ // Returns true for all verbose level.
+ return true
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
similarity index 52%
rename from vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
rename to vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
index bfc45102a..07df71e98 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2020 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,59 +16,17 @@
*
*/
-// Package grpclog (internal) defines depth logging for grpc.
-package grpclog
+package internal
import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
"os"
)
-// Logger is the logger used for the non-depth log functions.
-var Logger LoggerV2
-
-// DepthLogger is the logger used for the depth log functions.
-var DepthLogger DepthLoggerV2
-
-// InfoDepth logs to the INFO log at the specified depth.
-func InfoDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.InfoDepth(depth, args...)
- } else {
- Logger.Infoln(args...)
- }
-}
-
-// WarningDepth logs to the WARNING log at the specified depth.
-func WarningDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.WarningDepth(depth, args...)
- } else {
- Logger.Warningln(args...)
- }
-}
-
-// ErrorDepth logs to the ERROR log at the specified depth.
-func ErrorDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.ErrorDepth(depth, args...)
- } else {
- Logger.Errorln(args...)
- }
-}
-
-// FatalDepth logs to the FATAL log at the specified depth.
-func FatalDepth(depth int, args ...any) {
- if DepthLogger != nil {
- DepthLogger.FatalDepth(depth, args...)
- } else {
- Logger.Fatalln(args...)
- }
- os.Exit(1)
-}
-
// LoggerV2 does underlying logging work for grpclog.
-// This is a copy of the LoggerV2 defined in the external grpclog package. It
-// is defined here to avoid a circular dependency.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...any)
@@ -107,14 +65,13 @@ type LoggerV2 interface {
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
-// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
-// It is defined here to avoid a circular dependency.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
+ LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...any)
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
@@ -124,3 +81,124 @@ type DepthLoggerV2 interface {
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...any)
}
+
+const (
+ // infoLog indicates Info severity.
+ infoLog int = iota
+ // warningLog indicates Warning severity.
+ warningLog
+ // errorLog indicates Error severity.
+ errorLog
+ // fatalLog indicates Fatal severity.
+ fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+ m []*log.Logger
+ v int
+ jsonFormat bool
+}
+
+func (g *loggerT) output(severity int, s string) {
+ sevStr := severityName[severity]
+ if !g.jsonFormat {
+ g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
+ return
+ }
+ // TODO: we can also include the logging component, but that needs more
+ // (API) changes.
+ b, _ := json.Marshal(map[string]string{
+ "severity": sevStr,
+ "message": s,
+ })
+ g.m[severity].Output(2, string(b))
+}
+
+func (g *loggerT) Info(args ...any) {
+ g.output(infoLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Infoln(args ...any) {
+ g.output(infoLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Infof(format string, args ...any) {
+ g.output(infoLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Warning(args ...any) {
+ g.output(warningLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Warningln(args ...any) {
+ g.output(warningLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Warningf(format string, args ...any) {
+ g.output(warningLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Error(args ...any) {
+ g.output(errorLog, fmt.Sprint(args...))
+}
+
+func (g *loggerT) Errorln(args ...any) {
+ g.output(errorLog, fmt.Sprintln(args...))
+}
+
+func (g *loggerT) Errorf(format string, args ...any) {
+ g.output(errorLog, fmt.Sprintf(format, args...))
+}
+
+func (g *loggerT) Fatal(args ...any) {
+ g.output(fatalLog, fmt.Sprint(args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) Fatalln(args ...any) {
+ g.output(fatalLog, fmt.Sprintln(args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) Fatalf(format string, args ...any) {
+ g.output(fatalLog, fmt.Sprintf(format, args...))
+ os.Exit(1)
+}
+
+func (g *loggerT) V(l int) bool {
+ return l <= g.v
+}
+
+// LoggerV2Config configures the LoggerV2 implementation.
+type LoggerV2Config struct {
+ // Verbosity sets the verbosity level of the logger.
+ Verbosity int
+ // FormatJSON controls whether the logger should output logs in JSON format.
+ FormatJSON bool
+}
+
+// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
+// The infoW, warningW, and errorW writers are used to write log messages of
+// different severity levels.
+func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
+ var m []*log.Logger
+ flag := log.LstdFlags
+ if c.FormatJSON {
+ flag = 0
+ }
+ m = append(m, log.New(infoW, "", flag))
+ m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
+ ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
+ m = append(m, log.New(ew, "", flag))
+ m = append(m, log.New(ew, "", flag))
+ return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
index b1674d826..4b2035857 100644
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -18,70 +18,17 @@
package grpclog
-import "google.golang.org/grpc/internal/grpclog"
+import "google.golang.org/grpc/grpclog/internal"
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
-type Logger interface {
- Fatal(args ...any)
- Fatalf(format string, args ...any)
- Fatalln(args ...any)
- Print(args ...any)
- Printf(format string, args ...any)
- Println(args ...any)
-}
+type Logger internal.Logger
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
- grpclog.Logger = &loggerWrapper{Logger: l}
-}
-
-// loggerWrapper wraps Logger into a LoggerV2.
-type loggerWrapper struct {
- Logger
-}
-
-func (g *loggerWrapper) Info(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Infoln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Infof(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Warning(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Warningln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Warningf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Error(args ...any) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Errorln(args ...any) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Errorf(format string, args ...any) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) V(l int) bool {
- // Returns true for all verbose level.
- return true
+ internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
index ecfd36d71..892dc13d1 100644
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -19,52 +19,16 @@
package grpclog
import (
- "encoding/json"
- "fmt"
"io"
- "log"
"os"
"strconv"
"strings"
- "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/grpclog/internal"
)
// LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 interface {
- // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
- Info(args ...any)
- // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
- Infoln(args ...any)
- // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
- Infof(format string, args ...any)
- // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
- Warning(args ...any)
- // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
- Warningln(args ...any)
- // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
- Warningf(format string, args ...any)
- // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- Error(args ...any)
- // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- Errorln(args ...any)
- // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- Errorf(format string, args ...any)
- // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatal(args ...any)
- // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalln(args ...any)
- // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalf(format string, args ...any)
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l int) bool
-}
+type LoggerV2 internal.LoggerV2
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
@@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
- grpclog.Logger = l
- grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
-}
-
-const (
- // infoLog indicates Info severity.
- infoLog int = iota
- // warningLog indicates Warning severity.
- warningLog
- // errorLog indicates Error severity.
- errorLog
- // fatalLog indicates Fatal severity.
- fatalLog
-)
-
-// severityName contains the string representation of each severity.
-var severityName = []string{
- infoLog: "INFO",
- warningLog: "WARNING",
- errorLog: "ERROR",
- fatalLog: "FATAL",
-}
-
-// loggerT is the default logger used by grpclog.
-type loggerT struct {
- m []*log.Logger
- v int
- jsonFormat bool
+ internal.LoggerV2Impl = l
+ internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
@@ -108,32 +46,13 @@ type loggerT struct {
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
-}
-
-type loggerV2Config struct {
- verbose int
- jsonFormat bool
-}
-
-func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
- var m []*log.Logger
- flag := log.LstdFlags
- if c.jsonFormat {
- flag = 0
- }
- m = append(m, log.New(infoW, "", flag))
- m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
- ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
- m = append(m, log.New(ew, "", flag))
- m = append(m, log.New(ew, "", flag))
- return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
@@ -161,80 +80,10 @@ func newLoggerV2() LoggerV2 {
jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
- return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
- verbose: v,
- jsonFormat: jsonFormat,
- })
-}
-
-func (g *loggerT) output(severity int, s string) {
- sevStr := severityName[severity]
- if !g.jsonFormat {
- g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
- return
- }
- // TODO: we can also include the logging component, but that needs more
- // (API) changes.
- b, _ := json.Marshal(map[string]string{
- "severity": sevStr,
- "message": s,
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
+ Verbosity: v,
+ FormatJSON: jsonFormat,
})
- g.m[severity].Output(2, string(b))
-}
-
-func (g *loggerT) Info(args ...any) {
- g.output(infoLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Infoln(args ...any) {
- g.output(infoLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Infof(format string, args ...any) {
- g.output(infoLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Warning(args ...any) {
- g.output(warningLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Warningln(args ...any) {
- g.output(warningLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Warningf(format string, args ...any) {
- g.output(warningLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Error(args ...any) {
- g.output(errorLog, fmt.Sprint(args...))
-}
-
-func (g *loggerT) Errorln(args ...any) {
- g.output(errorLog, fmt.Sprintln(args...))
-}
-
-func (g *loggerT) Errorf(format string, args ...any) {
- g.output(errorLog, fmt.Sprintf(format, args...))
-}
-
-func (g *loggerT) Fatal(args ...any) {
- g.output(fatalLog, fmt.Sprint(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalln(args ...any) {
- g.output(fatalLog, fmt.Sprintln(args...))
- os.Exit(1)
-}
-
-func (g *loggerT) Fatalf(format string, args ...any) {
- g.output(fatalLog, fmt.Sprintf(format, args...))
- os.Exit(1)
-}
-
-func (g *loggerT) V(l int) bool {
- return l <= g.v
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
@@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool {
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
-type DepthLoggerV2 interface {
- LoggerV2
- // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
- InfoDepth(depth int, args ...any)
- // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
- WarningDepth(depth int, args ...any)
- // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
- ErrorDepth(depth int, args ...any)
- // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
- FatalDepth(depth int, args ...any)
-}
+type DepthLoggerV2 internal.DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 24299efd6..d92335445 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.31.0
-// protoc v4.22.0
+// protoc-gen-go v1.34.2
+// protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_grpc_health_v1_health_proto_goTypes = []interface{}{
+var file_grpc_health_v1_health_proto_goTypes = []any{
(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
(*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
(*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
@@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckRequest); i {
case 0:
return &v.state
@@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() {
return nil
}
}
- file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckResponse); i {
case 0:
return &v.state
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index a01a1b4d5..f96b8ab49 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -17,8 +17,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc v4.22.0
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@@ -32,8 +32,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
const (
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
@@ -43,9 +43,20 @@ const (
// HealthClient is the client API for Health service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+//
+// Health is gRPC's mechanism for checking whether a server is able to handle
+// RPCs. Its semantics are documented in
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
type HealthClient interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
+ // Check gets the health of the specified service. If the requested service
+ // is unknown, the call will fail with status NOT_FOUND. If the caller does
+ // not specify a service name, the server should respond with its overall
+ // health status.
+ //
+ // Clients should set a deadline when calling Check, and can declare the
+ // server unhealthy if they do not receive a timely response.
+ //
+ // Check implementations should be idempotent and side effect free.
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
@@ -62,7 +73,7 @@ type HealthClient interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
- Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
+ Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error)
}
type healthClient struct {
@@ -74,20 +85,22 @@ func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
}
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(HealthCheckResponse)
- err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, opts...)
+ err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
- stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, opts...)
+func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
- x := &healthWatchClient{stream}
+ x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@@ -97,29 +110,26 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts .
return x, nil
}
-type Health_WatchClient interface {
- Recv() (*HealthCheckResponse, error)
- grpc.ClientStream
-}
-
-type healthWatchClient struct {
- grpc.ClientStream
-}
-
-func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
- m := new(HealthCheckResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse]
// HealthServer is the server API for Health service.
// All implementations should embed UnimplementedHealthServer
-// for forward compatibility
+// for forward compatibility.
+//
+// Health is gRPC's mechanism for checking whether a server is able to handle
+// RPCs. Its semantics are documented in
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
type HealthServer interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
+ // Check gets the health of the specified service. If the requested service
+ // is unknown, the call will fail with status NOT_FOUND. If the caller does
+ // not specify a service name, the server should respond with its overall
+ // health status.
+ //
+ // Clients should set a deadline when calling Check, and can declare the
+ // server unhealthy if they do not receive a timely response.
+ //
+ // Check implementations should be idempotent and side effect free.
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
// Performs a watch for the serving status of the requested service.
// The server will immediately send back a message indicating the current
@@ -136,19 +146,23 @@ type HealthServer interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
- Watch(*HealthCheckRequest, Health_WatchServer) error
+ Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error
}
-// UnimplementedHealthServer should be embedded to have forward compatible implementations.
-type UnimplementedHealthServer struct {
-}
+// UnimplementedHealthServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedHealthServer struct{}
func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
}
-func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error {
+func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
+func (UnimplementedHealthServer) testEmbeddedByValue() {}
// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HealthServer will
@@ -158,6 +172,13 @@ type UnsafeHealthServer interface {
}
func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
+ // If the following call panics, it indicates UnimplementedHealthServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&Health_ServiceDesc, srv)
}
@@ -184,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
if err := stream.RecvMsg(m); err != nil {
return err
}
- return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
+ return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream})
}
-type Health_WatchServer interface {
- Send(*HealthCheckResponse) error
- grpc.ServerStream
-}
-
-type healthWatchServer struct {
- grpc.ServerStream
-}
-
-func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
- return x.ServerStream.SendMsg(m)
-}
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse]
// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
// It's only intended for direct use with grpc.RegisterService,
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
index 5fc0ee3da..b15cf482d 100644
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -23,10 +23,12 @@
package backoff
import (
+ "context"
+ "errors"
+ "math/rand"
"time"
grpcbackoff "google.golang.org/grpc/backoff"
- "google.golang.org/grpc/internal/grpcrand"
)
// Strategy defines the methodology for backing off after a grpc connection
@@ -65,9 +67,43 @@ func (bc Exponential) Backoff(retries int) time.Duration {
}
// Randomize backoff delays so that if a cluster of requests start at
// the same time, they won't operate in lockstep.
- backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
+ backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1)
if backoff < 0 {
return 0
}
return time.Duration(backoff)
}
+
+// ErrResetBackoff is the error to be returned by the function executed by RunF,
+// to instruct the latter to reset its backoff state.
+var ErrResetBackoff = errors.New("reset backoff state")
+
+// RunF provides a convenient way to run a function f repeatedly until the
+// context expires or f returns a non-nil error that is not ErrResetBackoff.
+// When f returns ErrResetBackoff, RunF continues to run f, but resets its
+// backoff state before doing so. backoff accepts an integer representing the
+// number of retries, and returns the amount of time to backoff.
+func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) {
+ attempt := 0
+ timer := time.NewTimer(0)
+ for ctx.Err() == nil {
+ select {
+ case <-timer.C:
+ case <-ctx.Done():
+ timer.Stop()
+ return
+ }
+
+ err := f()
+ if errors.Is(err, ErrResetBackoff) {
+ timer.Reset(0)
+ attempt = 0
+ continue
+ }
+ if err != nil {
+ return
+ }
+ timer.Reset(backoff(attempt))
+ attempt++
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
new file mode 100644
index 000000000..85540f86a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package gracefulswitch
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+type lbConfig struct {
+ serviceconfig.LoadBalancingConfig
+
+ childBuilder balancer.Builder
+ childConfig serviceconfig.LoadBalancingConfig
+}
+
+// ChildName returns the name of the child balancer of the gracefulswitch
+// Balancer.
+func ChildName(l serviceconfig.LoadBalancingConfig) string {
+ return l.(*lbConfig).childBuilder.Name()
+}
+
+// ParseConfig parses a child config list and returns a LB config for the
+// gracefulswitch Balancer.
+//
+// cfg is expected to be a json.RawMessage containing a JSON array of LB policy
+// names + configs as the format of the "loadBalancingConfig" field in
+// ServiceConfig. It returns a type that should be passed to
+// UpdateClientConnState in the BalancerConfig field.
+func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var lbCfg []map[string]json.RawMessage
+ if err := json.Unmarshal(cfg, &lbCfg); err != nil {
+ return nil, err
+ }
+ for i, e := range lbCfg {
+ if len(e) != 1 {
+ return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i)
+ }
+
+ var name string
+ var jsonCfg json.RawMessage
+ for name, jsonCfg = range e {
+ }
+
+ builder := balancer.Get(name)
+ if builder == nil {
+ // Skip unregistered balancer names.
+ continue
+ }
+
+ parser, ok := builder.(balancer.ConfigParser)
+ if !ok {
+ // This is a valid child with no config.
+ return &lbConfig{childBuilder: builder}, nil
+ }
+
+ cfg, err := parser.ParseConfig(jsonCfg)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err)
+ }
+ return &lbConfig{childBuilder: builder, childConfig: cfg}, nil
+ }
+
+ return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg))
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
index 3c594e6e4..73bb4c4ee 100644
--- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -94,14 +94,23 @@ func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
// process is not complete when this method returns. This method must be called
// synchronously alongside the rest of the balancer.Balancer methods this
// Graceful Switch Balancer implements.
+//
+// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState
+// to cause the Balancer to automatically change to the new child when necessary.
func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
+ _, err := gsb.switchTo(builder)
+ return err
+}
+
+func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) {
gsb.mu.Lock()
if gsb.closed {
gsb.mu.Unlock()
- return errBalancerClosed
+ return nil, errBalancerClosed
}
bw := &balancerWrapper{
- gsb: gsb,
+ builder: builder,
+ gsb: gsb,
lastState: balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
@@ -129,7 +138,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
gsb.balancerCurrent = nil
}
gsb.mu.Unlock()
- return balancer.ErrBadResolverState
+ return nil, balancer.ErrBadResolverState
}
// This write doesn't need to take gsb.mu because this field never gets read
@@ -138,7 +147,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
// bw.Balancer field will never be forwarded to until this SwitchTo()
// function returns.
bw.Balancer = newBalancer
- return nil
+ return bw, nil
}
// Returns nil if the graceful switch balancer is closed.
@@ -152,12 +161,32 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper {
}
// UpdateClientConnState forwards the update to the latest balancer created.
+//
+// If the state's BalancerConfig is the config returned by a call to
+// gracefulswitch.ParseConfig, then this function will automatically SwitchTo
+// the balancer indicated by the config before forwarding its config to it, if
+// necessary.
func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
// The resolver data is only relevant to the most recent LB Policy.
balToUpdate := gsb.latestBalancer()
+ gsbCfg, ok := state.BalancerConfig.(*lbConfig)
+ if ok {
+ // Switch to the child in the config unless it is already active.
+ if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() {
+ var err error
+ balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder)
+ if err != nil {
+ return fmt.Errorf("could not switch to new child balancer: %w", err)
+ }
+ }
+ // Unwrap the child balancer's config.
+ state.BalancerConfig = gsbCfg.childConfig
+ }
+
if balToUpdate == nil {
return errBalancerClosed
}
+
// Perform this call without gsb.mu to prevent deadlocks if the child calls
// back into the channel. The latest balancer can never be closed during a
// call from the channel, even without gsb.mu held.
@@ -169,6 +198,10 @@ func (gsb *Balancer) ResolverError(err error) {
// The resolver data is only relevant to the most recent LB Policy.
balToUpdate := gsb.latestBalancer()
if balToUpdate == nil {
+ gsb.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: base.NewErrPicker(err),
+ })
return
}
// Perform this call without gsb.mu to prevent deadlocks if the child calls
@@ -261,7 +294,8 @@ func (gsb *Balancer) Close() {
// graceful switch logic.
type balancerWrapper struct {
balancer.Balancer
- gsb *Balancer
+ gsb *Balancer
+ builder balancer.Builder
lastState balancer.State
subconns map[balancer.SubConn]bool // subconns created by this balancer
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index 0f31274a3..966932891 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -25,11 +25,12 @@ import (
"sync/atomic"
"time"
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/durationpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
type callIDGenerator struct {
@@ -64,7 +65,7 @@ type TruncatingMethodLogger struct {
callID uint64
idWithinCallGen *callIDGenerator
- sink Sink // TODO(blog): make this plugable.
+ sink Sink // TODO(blog): make this pluggable.
}
// NewTruncatingMethodLogger returns a new truncating method logger.
@@ -79,7 +80,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
callID: idGen.next(),
idWithinCallGen: &callIDGenerator{},
- sink: DefaultSink, // TODO(blog): make it plugable.
+ sink: DefaultSink, // TODO(blog): make it pluggable.
}
}
@@ -88,7 +89,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
// in TruncatingMethodLogger as possible.
func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
m := c.toProto()
- timestamp, _ := ptypes.TimestampProto(time.Now())
+ timestamp := timestamppb.Now()
m.Timestamp = timestamp
m.CallId = ml.callID
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
@@ -105,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry
}
// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
+func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}
@@ -178,7 +179,7 @@ func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
Authority: c.Authority,
}
if c.Timeout > 0 {
- clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
+ clientHeader.Timeout = durationpb.New(c.Timeout)
}
ret := &binlogpb.GrpcLogEntry{
Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
@@ -396,7 +397,7 @@ func metadataKeyOmit(key string) bool {
switch key {
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
return true
- case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
+ case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
return false
}
return strings.HasPrefix(key, "grpc-")
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
index 264de387c..9ea598b14 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -25,8 +25,8 @@ import (
"sync"
"time"
- "github.com/golang/protobuf/proto"
binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+ "google.golang.org/protobuf/proto"
)
var (
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
index 4399c3df4..11f91668a 100644
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -18,7 +18,10 @@
// Package buffer provides an implementation of an unbounded buffer.
package buffer
-import "sync"
+import (
+ "errors"
+ "sync"
+)
// Unbounded is an implementation of an unbounded buffer which does not use
// extra goroutines. This is typically used for passing updates from one entity
@@ -36,6 +39,7 @@ import "sync"
type Unbounded struct {
c chan any
closed bool
+ closing bool
mu sync.Mutex
backlog []any
}
@@ -45,32 +49,32 @@ func NewUnbounded() *Unbounded {
return &Unbounded{c: make(chan any, 1)}
}
+var errBufferClosed = errors.New("Put called on closed buffer.Unbounded")
+
// Put adds t to the unbounded buffer.
-func (b *Unbounded) Put(t any) {
+func (b *Unbounded) Put(t any) error {
b.mu.Lock()
defer b.mu.Unlock()
- if b.closed {
- return
+ if b.closing {
+ return errBufferClosed
}
if len(b.backlog) == 0 {
select {
case b.c <- t:
- return
+ return nil
default:
}
}
b.backlog = append(b.backlog, t)
+ return nil
}
-// Load sends the earliest buffered data, if any, onto the read channel
-// returned by Get(). Users are expected to call this every time they read a
+// Load sends the earliest buffered data, if any, onto the read channel returned
+// by Get(). Users are expected to call this every time they successfully read a
// value from the read channel.
func (b *Unbounded) Load() {
b.mu.Lock()
defer b.mu.Unlock()
- if b.closed {
- return
- }
if len(b.backlog) > 0 {
select {
case b.c <- b.backlog[0]:
@@ -78,6 +82,8 @@ func (b *Unbounded) Load() {
b.backlog = b.backlog[1:]
default:
}
+ } else if b.closing && !b.closed {
+ close(b.c)
}
}
@@ -88,18 +94,23 @@ func (b *Unbounded) Load() {
// send the next buffered value onto the channel if there is any.
//
// If the unbounded buffer is closed, the read channel returned by this method
-// is closed.
+// is closed after all data is drained.
func (b *Unbounded) Get() <-chan any {
return b.c
}
-// Close closes the unbounded buffer.
+// Close closes the unbounded buffer. No subsequent data may be Put(), and the
+// channel returned from Get() will be closed after all the data is read and
+// Load() is called for the final time.
func (b *Unbounded) Close() {
b.mu.Lock()
defer b.mu.Unlock()
- if b.closed {
+ if b.closing {
return
}
- b.closed = true
- close(b.c)
+ b.closing = true
+ if len(b.backlog) == 0 {
+ b.closed = true
+ close(b.c)
+ }
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go
new file mode 100644
index 000000000..3ec662799
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go
@@ -0,0 +1,270 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "google.golang.org/grpc/connectivity"
+)
+
+// Channel represents a channel within channelz, which includes metrics and
+// internal channelz data, such as channelz id, child list, etc.
+type Channel struct {
+ Entity
+ // ID is the channelz id of this channel.
+ ID int64
+ // RefName is the human readable reference string of this channel.
+ RefName string
+
+ closeCalled bool
+ nestedChans map[int64]string
+ subChans map[int64]string
+ Parent *Channel
+ trace *ChannelTrace
+ // traceRefCount is the number of trace events that reference this channel.
+ // Non-zero traceRefCount means the trace of this channel cannot be deleted.
+ traceRefCount int32
+
+ // ChannelMetrics holds connectivity state, target and call metrics for the
+ // channel within channelz.
+ ChannelMetrics ChannelMetrics
+}
+
+// Implemented to make Channel implement the Identifier interface used for
+// nesting.
+func (c *Channel) channelzIdentifier() {}
+
+// String returns a string representation of the Channel, including its parent
+// entity and ID.
+func (c *Channel) String() string {
+ if c.Parent == nil {
+ return fmt.Sprintf("Channel #%d", c.ID)
+ }
+ return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID)
+}
+
+func (c *Channel) id() int64 {
+ return c.ID
+}
+
+// SubChans returns a copy of the map of sub-channels associated with the
+// Channel.
+func (c *Channel) SubChans() map[int64]string {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return copyMap(c.subChans)
+}
+
+// NestedChans returns a copy of the map of nested channels associated with the
+// Channel.
+func (c *Channel) NestedChans() map[int64]string {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return copyMap(c.nestedChans)
+}
+
+// Trace returns a copy of the Channel's trace data.
+func (c *Channel) Trace() *ChannelTrace {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return c.trace.copy()
+}
+
+// ChannelMetrics holds connectivity state, target and call metrics for the
+// channel within channelz.
+type ChannelMetrics struct {
+ // The current connectivity state of the channel.
+ State atomic.Pointer[connectivity.State]
+ // The target this channel originally tried to connect to. May be absent
+ Target atomic.Pointer[string]
+ // The number of calls started on the channel.
+ CallsStarted atomic.Int64
+ // The number of calls that have completed with an OK status.
+ CallsSucceeded atomic.Int64
+ // The number of calls that have a completed with a non-OK status.
+ CallsFailed atomic.Int64
+ // The last time a call was started on the channel.
+ LastCallStartedTimestamp atomic.Int64
+}
+
+// CopyFrom copies the metrics in o to c. For testing only.
+func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) {
+ c.State.Store(o.State.Load())
+ c.Target.Store(o.Target.Load())
+ c.CallsStarted.Store(o.CallsStarted.Load())
+ c.CallsSucceeded.Store(o.CallsSucceeded.Load())
+ c.CallsFailed.Store(o.CallsFailed.Load())
+ c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
+}
+
+// Equal returns true iff the metrics of c are the same as the metrics of o.
+// For testing only.
+func (c *ChannelMetrics) Equal(o any) bool {
+ oc, ok := o.(*ChannelMetrics)
+ if !ok {
+ return false
+ }
+ if (c.State.Load() == nil) != (oc.State.Load() == nil) {
+ return false
+ }
+ if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() {
+ return false
+ }
+ if (c.Target.Load() == nil) != (oc.Target.Load() == nil) {
+ return false
+ }
+ if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() {
+ return false
+ }
+ return c.CallsStarted.Load() == oc.CallsStarted.Load() &&
+ c.CallsFailed.Load() == oc.CallsFailed.Load() &&
+ c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() &&
+ c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load()
+}
+
+func strFromPointer(s *string) string {
+ if s == nil {
+ return ""
+ }
+ return *s
+}
+
+// String returns a string representation of the ChannelMetrics, including its
+// state, target, and call metrics.
+func (c *ChannelMetrics) String() string {
+ return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
+ c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
+ )
+}
+
+// NewChannelMetricForTesting creates a new instance of ChannelMetrics with
+// specified initial values for testing purposes.
+func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
+ c := &ChannelMetrics{}
+ c.State.Store(&state)
+ c.Target.Store(&target)
+ c.CallsStarted.Store(started)
+ c.CallsSucceeded.Store(succeeded)
+ c.CallsFailed.Store(failed)
+ c.LastCallStartedTimestamp.Store(timestamp)
+ return c
+}
+
+func (c *Channel) addChild(id int64, e entry) {
+ switch v := e.(type) {
+ case *SubChannel:
+ c.subChans[id] = v.RefName
+ case *Channel:
+ c.nestedChans[id] = v.RefName
+ default:
+ logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+ }
+}
+
+func (c *Channel) deleteChild(id int64) {
+ delete(c.subChans, id)
+ delete(c.nestedChans, id)
+ c.deleteSelfIfReady()
+}
+
+func (c *Channel) triggerDelete() {
+ c.closeCalled = true
+ c.deleteSelfIfReady()
+}
+
+func (c *Channel) getParentID() int64 {
+ if c.Parent == nil {
+ return -1
+ }
+ return c.Parent.ID
+}
+
+// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
+// deleting the channel reference from its parent's child list.
+//
+// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
+// corresponding grpc object has been invoked, and the channel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (c *Channel) deleteSelfFromTree() (deleted bool) {
+ if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
+ return false
+ }
+ // not top channel
+ if c.Parent != nil {
+ c.Parent.deleteChild(c.ID)
+ }
+ return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
+// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
+// channel, and its memory will be garbage collected.
+//
+// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (c *Channel) deleteSelfFromMap() (delete bool) {
+ return c.getTraceRefCount() == 0
+}
+
+// deleteSelfIfReady tries to delete the channel itself from the channelz database.
+// The delete process includes two steps:
+// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
+// parent's child list.
+// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
+// will return entry not found error.
+func (c *Channel) deleteSelfIfReady() {
+ if !c.deleteSelfFromTree() {
+ return
+ }
+ if !c.deleteSelfFromMap() {
+ return
+ }
+ db.deleteEntry(c.ID)
+ c.trace.clear()
+}
+
+func (c *Channel) getChannelTrace() *ChannelTrace {
+ return c.trace
+}
+
+func (c *Channel) incrTraceRefCount() {
+ atomic.AddInt32(&c.traceRefCount, 1)
+}
+
+func (c *Channel) decrTraceRefCount() {
+ atomic.AddInt32(&c.traceRefCount, -1)
+}
+
+func (c *Channel) getTraceRefCount() int {
+ i := atomic.LoadInt32(&c.traceRefCount)
+ return int(i)
+}
+
+func (c *Channel) getRefName() string {
+ return c.RefName
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
new file mode 100644
index 000000000..64c791953
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
@@ -0,0 +1,395 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+)
+
+// entry represents a node in the channelz database.
+type entry interface {
+ // addChild adds a child e, whose channelz id is id to child list
+ addChild(id int64, e entry)
+ // deleteChild deletes a child with channelz id to be id from child list
+ deleteChild(id int64)
+ // triggerDelete tries to delete self from channelz database. However, if
+ // child list is not empty, then deletion from the database is on hold until
+ // the last child is deleted from database.
+ triggerDelete()
+ // deleteSelfIfReady check whether triggerDelete() has been called before,
+ // and whether child list is now empty. If both conditions are met, then
+ // delete self from database.
+ deleteSelfIfReady()
+ // getParentID returns parent ID of the entry. 0 value parent ID means no parent.
+ getParentID() int64
+ Entity
+}
+
+// channelMap is the storage data structure for channelz.
+//
+// Methods of channelMap can be divided into two categories with respect to
+// locking.
+//
+// 1. Methods acquire the global lock.
+// 2. Methods that can only be called when global lock is held.
+//
+// A second type of method need always to be called inside a first type of method.
+type channelMap struct {
+ mu sync.RWMutex
+ topLevelChannels map[int64]struct{}
+ channels map[int64]*Channel
+ subChannels map[int64]*SubChannel
+ sockets map[int64]*Socket
+ servers map[int64]*Server
+}
+
+func newChannelMap() *channelMap {
+ return &channelMap{
+ topLevelChannels: make(map[int64]struct{}),
+ channels: make(map[int64]*Channel),
+ subChannels: make(map[int64]*SubChannel),
+ sockets: make(map[int64]*Socket),
+ servers: make(map[int64]*Server),
+ }
+}
+
+func (c *channelMap) addServer(id int64, s *Server) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ s.cm = c
+ c.servers[id] = s
+}
+
+func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cn.trace.cm = c
+ c.channels[id] = cn
+ if isTopChannel {
+ c.topLevelChannels[id] = struct{}{}
+ } else if p := c.channels[pid]; p != nil {
+ p.addChild(id, cn)
+ } else {
+ logger.Infof("channel %d references invalid parent ID %d", id, pid)
+ }
+}
+
+func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ sc.trace.cm = c
+ c.subChannels[id] = sc
+ if p := c.channels[pid]; p != nil {
+ p.addChild(id, sc)
+ } else {
+ logger.Infof("subchannel %d references invalid parent ID %d", id, pid)
+ }
+}
+
+func (c *channelMap) addSocket(s *Socket) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ s.cm = c
+ c.sockets[s.ID] = s
+ if s.Parent == nil {
+ logger.Infof("normal socket %d has no parent", s.ID)
+ }
+ s.Parent.(entry).addChild(s.ID, s)
+}
+
+// removeEntry triggers the removal of an entry, which may not indeed delete the
+// entry, if it has to wait on the deletion of its children and until no other
+// entity's channel trace references it. It may lead to a chain of entry
+// deletion. For example, deleting the last socket of a gracefully shutting down
+// server will lead to the server being also deleted.
+func (c *channelMap) removeEntry(id int64) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.findEntry(id).triggerDelete()
+}
+
+// tracedChannel represents tracing operations which are present on both
+// channels and subChannels.
+type tracedChannel interface {
+ getChannelTrace() *ChannelTrace
+ incrTraceRefCount()
+ decrTraceRefCount()
+ getRefName() string
+}
+
+// c.mu must be held by the caller
+func (c *channelMap) decrTraceRefCount(id int64) {
+ e := c.findEntry(id)
+ if v, ok := e.(tracedChannel); ok {
+ v.decrTraceRefCount()
+ e.deleteSelfIfReady()
+ }
+}
+
+// c.mu must be held by the caller.
+func (c *channelMap) findEntry(id int64) entry {
+ if v, ok := c.channels[id]; ok {
+ return v
+ }
+ if v, ok := c.subChannels[id]; ok {
+ return v
+ }
+ if v, ok := c.servers[id]; ok {
+ return v
+ }
+ if v, ok := c.sockets[id]; ok {
+ return v
+ }
+ return &dummyEntry{idNotFound: id}
+}
+
+// c.mu must be held by the caller
+//
+// deleteEntry deletes an entry from the channelMap. Before calling this method,
+// caller must check this entry is ready to be deleted, i.e removeEntry() has
+// been called on it, and no children still exist.
+func (c *channelMap) deleteEntry(id int64) entry {
+ if v, ok := c.sockets[id]; ok {
+ delete(c.sockets, id)
+ return v
+ }
+ if v, ok := c.subChannels[id]; ok {
+ delete(c.subChannels, id)
+ return v
+ }
+ if v, ok := c.channels[id]; ok {
+ delete(c.channels, id)
+ delete(c.topLevelChannels, id)
+ return v
+ }
+ if v, ok := c.servers[id]; ok {
+ delete(c.servers, id)
+ return v
+ }
+ return &dummyEntry{idNotFound: id}
+}
+
+func (c *channelMap) traceEvent(id int64, desc *TraceEvent) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ child := c.findEntry(id)
+ childTC, ok := child.(tracedChannel)
+ if !ok {
+ return
+ }
+ childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
+ if desc.Parent != nil {
+ parent := c.findEntry(child.getParentID())
+ var chanType RefChannelType
+ switch child.(type) {
+ case *Channel:
+ chanType = RefChannel
+ case *SubChannel:
+ chanType = RefSubChannel
+ }
+ if parentTC, ok := parent.(tracedChannel); ok {
+ parentTC.getChannelTrace().append(&traceEvent{
+ Desc: desc.Parent.Desc,
+ Severity: desc.Parent.Severity,
+ Timestamp: time.Now(),
+ RefID: id,
+ RefName: childTC.getRefName(),
+ RefType: chanType,
+ })
+ childTC.incrTraceRefCount()
+ }
+ }
+}
+
+type int64Slice []int64
+
+func (s int64Slice) Len() int { return len(s) }
+func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
+
+func copyMap(m map[int64]string) map[int64]string {
+ n := make(map[int64]string)
+ for k, v := range m {
+ n[k] = v
+ }
+ return n
+}
+
+func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
+ if maxResults <= 0 {
+ maxResults = EntriesPerPage
+ }
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ l := int64(len(c.topLevelChannels))
+ ids := make([]int64, 0, l)
+
+ for k := range c.topLevelChannels {
+ ids = append(ids, k)
+ }
+ sort.Sort(int64Slice(ids))
+ idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+ end := true
+ var t []*Channel
+ for _, v := range ids[idx:] {
+ if len(t) == maxResults {
+ end = false
+ break
+ }
+ if cn, ok := c.channels[v]; ok {
+ t = append(t, cn)
+ }
+ }
+ return t, end
+}
+
+func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) {
+ if maxResults <= 0 {
+ maxResults = EntriesPerPage
+ }
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ ids := make([]int64, 0, len(c.servers))
+ for k := range c.servers {
+ ids = append(ids, k)
+ }
+ sort.Sort(int64Slice(ids))
+ idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+ end := true
+ var s []*Server
+ for _, v := range ids[idx:] {
+ if len(s) == maxResults {
+ end = false
+ break
+ }
+ if svr, ok := c.servers[v]; ok {
+ s = append(s, svr)
+ }
+ }
+ return s, end
+}
+
+func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
+ if maxResults <= 0 {
+ maxResults = EntriesPerPage
+ }
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ svr, ok := c.servers[id]
+ if !ok {
+ // server with id doesn't exist.
+ return nil, true
+ }
+ svrskts := svr.sockets
+ ids := make([]int64, 0, len(svrskts))
+ sks := make([]*Socket, 0, min(len(svrskts), maxResults))
+ for k := range svrskts {
+ ids = append(ids, k)
+ }
+ sort.Sort(int64Slice(ids))
+ idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
+ end := true
+ for _, v := range ids[idx:] {
+ if len(sks) == maxResults {
+ end = false
+ break
+ }
+ if ns, ok := c.sockets[v]; ok {
+ sks = append(sks, ns)
+ }
+ }
+ return sks, end
+}
+
+func (c *channelMap) getChannel(id int64) *Channel {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.channels[id]
+}
+
+func (c *channelMap) getSubChannel(id int64) *SubChannel {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.subChannels[id]
+}
+
+func (c *channelMap) getSocket(id int64) *Socket {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.sockets[id]
+}
+
+func (c *channelMap) getServer(id int64) *Server {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.servers[id]
+}
+
+type dummyEntry struct {
+ // dummyEntry is a fake entry to handle entry not found case.
+ idNotFound int64
+ Entity
+}
+
+func (d *dummyEntry) String() string {
+ return fmt.Sprintf("non-existent entity #%d", d.idNotFound)
+}
+
+func (d *dummyEntry) ID() int64 { return d.idNotFound }
+
+func (d *dummyEntry) addChild(id int64, e entry) {
+ // Note: It is possible for a normal program to reach here under race
+ // condition. For example, there could be a race between ClientConn.Close()
+ // info being propagated to addrConn and http2Client. ClientConn.Close()
+ // cancel the context and result in http2Client to error. The error info is
+ // then caught by transport monitor and before addrConn.tearDown() is called
+ // in side ClientConn.Close(). Therefore, the addrConn will create a new
+ // transport. And when registering the new transport in channelz, its parent
+ // addrConn could have already been torn down and deleted from channelz
+ // tracking, and thus reach the code here.
+ logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+}
+
+func (d *dummyEntry) deleteChild(id int64) {
+ // It is possible for a normal program to reach here under race condition.
+ // Refer to the example described in addChild().
+ logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+}
+
+func (d *dummyEntry) triggerDelete() {
+ logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+}
+
+func (*dummyEntry) deleteSelfIfReady() {
+ // code should not reach here. deleteSelfIfReady is always called on an existing entry.
+}
+
+func (*dummyEntry) getParentID() int64 {
+ return 0
+}
+
+// Entity is implemented by all channelz types.
+type Entity interface {
+ isEntity()
+ fmt.Stringer
+ id() int64
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index 5395e7752..078bb8123 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -16,25 +16,16 @@
*
*/
-// Package channelz defines APIs for enabling channelz service, entry
+// Package channelz defines internal APIs for enabling channelz service, entry
// registration/deletion, and accessing channelz data. It also defines channelz
// metric struct formats.
-//
-// All APIs in this package are experimental.
package channelz
import (
- "errors"
- "sort"
- "sync"
"sync/atomic"
"time"
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- defaultMaxTraceEntry int32 = 30
+ "google.golang.org/grpc/internal"
)
var (
@@ -42,19 +33,20 @@ var (
// outside this package except by tests.
IDGen IDGenerator
- db dbWrapper
- // EntryPerPage defines the number of channelz entries to be shown on a web page.
- EntryPerPage = int64(50)
- curState int32
- maxTraceEntry = defaultMaxTraceEntry
+ db = newChannelMap()
+ // EntriesPerPage defines the number of channelz entries to be shown on a web page.
+ EntriesPerPage = 50
+ curState int32
)
// TurnOn turns on channelz data collection.
func TurnOn() {
- if !IsOn() {
- db.set(newChannelMap())
- IDGen.Reset()
- atomic.StoreInt32(&curState, 1)
+ atomic.StoreInt32(&curState, 1)
+}
+
+func init() {
+ internal.ChannelzTurnOffForTesting = func() {
+ atomic.StoreInt32(&curState, 0)
}
}
@@ -63,49 +55,15 @@ func IsOn() bool {
return atomic.LoadInt32(&curState) == 1
}
-// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
-// Setting it to 0 will disable channel tracing.
-func SetMaxTraceEntry(i int32) {
- atomic.StoreInt32(&maxTraceEntry, i)
-}
-
-// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
-func ResetMaxTraceEntryToDefault() {
- atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
-}
-
-func getMaxTraceEntry() int {
- i := atomic.LoadInt32(&maxTraceEntry)
- return int(i)
-}
-
-// dbWarpper wraps around a reference to internal channelz data storage, and
-// provide synchronized functionality to set and get the reference.
-type dbWrapper struct {
- mu sync.RWMutex
- DB *channelMap
-}
-
-func (d *dbWrapper) set(db *channelMap) {
- d.mu.Lock()
- d.DB = db
- d.mu.Unlock()
-}
-
-func (d *dbWrapper) get() *channelMap {
- d.mu.RLock()
- defer d.mu.RUnlock()
- return d.DB
-}
-
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
// boolean indicating whether there's more top channels to be queried for.
//
-// The arg id specifies that only top channel with id at or above it will be included
-// in the result. The returned slice is up to a length of the arg maxResults or
-// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
- return db.get().GetTopChannels(id, maxResults)
+// The arg id specifies that only top channel with id at or above it will be
+// included in the result. The returned slice is up to a length of the arg
+// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending
+// id order.
+func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) {
+ return db.getTopChannels(id, maxResults)
}
// GetServers returns a slice of server's ServerMetric, along with a
@@ -113,73 +71,69 @@ func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
//
// The arg id specifies that only server with id at or above it will be included
// in the result. The returned slice is up to a length of the arg maxResults or
-// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
- return db.get().GetServers(id, maxResults)
+// EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServers(id int64, maxResults int) ([]*Server, bool) {
+ return db.getServers(id, maxResults)
}
// GetServerSockets returns a slice of server's (identified by id) normal socket's
-// SocketMetric, along with a boolean indicating whether there's more sockets to
+// SocketMetrics, along with a boolean indicating whether there's more sockets to
// be queried for.
//
// The arg startID specifies that only sockets with id at or above it will be
// included in the result. The returned slice is up to a length of the arg maxResults
-// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
- return db.get().GetServerSockets(id, startID, maxResults)
+// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
+ return db.getServerSockets(id, startID, maxResults)
}
-// GetChannel returns the ChannelMetric for the channel (identified by id).
-func GetChannel(id int64) *ChannelMetric {
- return db.get().GetChannel(id)
+// GetChannel returns the Channel for the channel (identified by id).
+func GetChannel(id int64) *Channel {
+ return db.getChannel(id)
}
-// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
-func GetSubChannel(id int64) *SubChannelMetric {
- return db.get().GetSubChannel(id)
+// GetSubChannel returns the SubChannel for the subchannel (identified by id).
+func GetSubChannel(id int64) *SubChannel {
+ return db.getSubChannel(id)
}
-// GetSocket returns the SocketInternalMetric for the socket (identified by id).
-func GetSocket(id int64) *SocketMetric {
- return db.get().GetSocket(id)
+// GetSocket returns the Socket for the socket (identified by id).
+func GetSocket(id int64) *Socket {
+ return db.getSocket(id)
}
// GetServer returns the ServerMetric for the server (identified by id).
-func GetServer(id int64) *ServerMetric {
- return db.get().GetServer(id)
+func GetServer(id int64) *Server {
+ return db.getServer(id)
}
// RegisterChannel registers the given channel c in the channelz database with
-// ref as its reference name, and adds it to the child list of its parent
-// (identified by pid). pid == nil means no parent.
+// target as its target and reference name, and adds it to the child list of its
+// parent. parent == nil means no parent.
//
// Returns a unique channelz identifier assigned to this channel.
//
// If channelz is not turned ON, the channelz database is not mutated.
-func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
+func RegisterChannel(parent *Channel, target string) *Channel {
id := IDGen.genID()
- var parent int64
- isTopChannel := true
- if pid != nil {
- isTopChannel = false
- parent = pid.Int()
- }
if !IsOn() {
- return newIdentifer(RefChannel, id, pid)
+ return &Channel{ID: id}
}
- cn := &channel{
- refName: ref,
- c: c,
- subChans: make(map[int64]string),
+ isTopChannel := parent == nil
+
+ cn := &Channel{
+ ID: id,
+ RefName: target,
nestedChans: make(map[int64]string),
- id: id,
- pid: parent,
- trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
+ subChans: make(map[int64]string),
+ Parent: parent,
+ trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())},
}
- db.get().addChannel(id, cn, isTopChannel, parent)
- return newIdentifer(RefChannel, id, pid)
+ cn.ChannelMetrics.Target.Store(&target)
+ db.addChannel(id, cn, isTopChannel, cn.getParentID())
+ return cn
}
// RegisterSubChannel registers the given subChannel c in the channelz database
@@ -189,555 +143,67 @@ func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier {
// Returns a unique channelz identifier assigned to this subChannel.
//
// If channelz is not turned ON, the channelz database is not mutated.
-func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) {
- if pid == nil {
- return nil, errors.New("a SubChannel's parent id cannot be nil")
- }
+func RegisterSubChannel(parent *Channel, ref string) *SubChannel {
id := IDGen.genID()
- if !IsOn() {
- return newIdentifer(RefSubChannel, id, pid), nil
+ sc := &SubChannel{
+ ID: id,
+ RefName: ref,
+ parent: parent,
}
- sc := &subChannel{
- refName: ref,
- c: c,
- sockets: make(map[int64]string),
- id: id,
- pid: pid.Int(),
- trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
+ if !IsOn() {
+ return sc
}
- db.get().addSubChannel(id, sc, pid.Int())
- return newIdentifer(RefSubChannel, id, pid), nil
+
+ sc.sockets = make(map[int64]string)
+ sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}
+ db.addSubChannel(id, sc, parent.ID)
+ return sc
}
// RegisterServer registers the given server s in channelz database. It returns
// the unique channelz tracking id assigned to this server.
//
// If channelz is not turned ON, the channelz database is not mutated.
-func RegisterServer(s Server, ref string) *Identifier {
+func RegisterServer(ref string) *Server {
id := IDGen.genID()
if !IsOn() {
- return newIdentifer(RefServer, id, nil)
+ return &Server{ID: id}
}
- svr := &server{
- refName: ref,
- s: s,
+ svr := &Server{
+ RefName: ref,
sockets: make(map[int64]string),
listenSockets: make(map[int64]string),
- id: id,
- }
- db.get().addServer(id, svr)
- return newIdentifer(RefServer, id, nil)
-}
-
-// RegisterListenSocket registers the given listen socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this listen socket.
-//
-// If channelz is not turned ON, the channelz database is not mutated.
-func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
- if pid == nil {
- return nil, errors.New("a ListenSocket's parent id cannot be 0")
+ ID: id,
}
- id := IDGen.genID()
- if !IsOn() {
- return newIdentifer(RefListenSocket, id, pid), nil
- }
-
- ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()}
- db.get().addListenSocket(id, ls, pid.Int())
- return newIdentifer(RefListenSocket, id, pid), nil
+ db.addServer(id, svr)
+ return svr
}
-// RegisterNormalSocket registers the given normal socket s in channelz database
+// RegisterSocket registers the given normal socket s in channelz database
// with ref as its reference name, and adds it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this normal socket.
+// (identified by skt.Parent, which must be set). It returns the unique channelz
+// tracking id assigned to this normal socket.
//
// If channelz is not turned ON, the channelz database is not mutated.
-func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) {
- if pid == nil {
- return nil, errors.New("a NormalSocket's parent id cannot be 0")
- }
- id := IDGen.genID()
- if !IsOn() {
- return newIdentifer(RefNormalSocket, id, pid), nil
+func RegisterSocket(skt *Socket) *Socket {
+ skt.ID = IDGen.genID()
+ if IsOn() {
+ db.addSocket(skt)
}
-
- ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()}
- db.get().addNormalSocket(id, ns, pid.Int())
- return newIdentifer(RefNormalSocket, id, pid), nil
+ return skt
}
// RemoveEntry removes an entry with unique channelz tracking id to be id from
// channelz database.
//
// If channelz is not turned ON, this function is a no-op.
-func RemoveEntry(id *Identifier) {
+func RemoveEntry(id int64) {
if !IsOn() {
return
}
- db.get().removeEntry(id.Int())
-}
-
-// TraceEventDesc is what the caller of AddTraceEvent should provide to describe
-// the event to be added to the channel trace.
-//
-// The Parent field is optional. It is used for an event that will be recorded
-// in the entity's parent trace.
-type TraceEventDesc struct {
- Desc string
- Severity Severity
- Parent *TraceEventDesc
-}
-
-// AddTraceEvent adds trace related to the entity with specified id, using the
-// provided TraceEventDesc.
-//
-// If channelz is not turned ON, this will simply log the event descriptions.
-func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) {
- // Log only the trace description associated with the bottom most entity.
- switch desc.Severity {
- case CtUnknown, CtInfo:
- l.InfoDepth(depth+1, withParens(id)+desc.Desc)
- case CtWarning:
- l.WarningDepth(depth+1, withParens(id)+desc.Desc)
- case CtError:
- l.ErrorDepth(depth+1, withParens(id)+desc.Desc)
- }
-
- if getMaxTraceEntry() == 0 {
- return
- }
- if IsOn() {
- db.get().traceEvent(id.Int(), desc)
- }
-}
-
-// channelMap is the storage data structure for channelz.
-// Methods of channelMap can be divided in two two categories with respect to locking.
-// 1. Methods acquire the global lock.
-// 2. Methods that can only be called when global lock is held.
-// A second type of method need always to be called inside a first type of method.
-type channelMap struct {
- mu sync.RWMutex
- topLevelChannels map[int64]struct{}
- servers map[int64]*server
- channels map[int64]*channel
- subChannels map[int64]*subChannel
- listenSockets map[int64]*listenSocket
- normalSockets map[int64]*normalSocket
-}
-
-func newChannelMap() *channelMap {
- return &channelMap{
- topLevelChannels: make(map[int64]struct{}),
- channels: make(map[int64]*channel),
- listenSockets: make(map[int64]*listenSocket),
- normalSockets: make(map[int64]*normalSocket),
- servers: make(map[int64]*server),
- subChannels: make(map[int64]*subChannel),
- }
-}
-
-func (c *channelMap) addServer(id int64, s *server) {
- c.mu.Lock()
- s.cm = c
- c.servers[id] = s
- c.mu.Unlock()
-}
-
-func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
- c.mu.Lock()
- cn.cm = c
- cn.trace.cm = c
- c.channels[id] = cn
- if isTopChannel {
- c.topLevelChannels[id] = struct{}{}
- } else {
- c.findEntry(pid).addChild(id, cn)
- }
- c.mu.Unlock()
-}
-
-func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
- c.mu.Lock()
- sc.cm = c
- sc.trace.cm = c
- c.subChannels[id] = sc
- c.findEntry(pid).addChild(id, sc)
- c.mu.Unlock()
-}
-
-func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
- c.mu.Lock()
- ls.cm = c
- c.listenSockets[id] = ls
- c.findEntry(pid).addChild(id, ls)
- c.mu.Unlock()
-}
-
-func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
- c.mu.Lock()
- ns.cm = c
- c.normalSockets[id] = ns
- c.findEntry(pid).addChild(id, ns)
- c.mu.Unlock()
-}
-
-// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
-// wait on the deletion of its children and until no other entity's channel trace references it.
-// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
-// shutting down server will lead to the server being also deleted.
-func (c *channelMap) removeEntry(id int64) {
- c.mu.Lock()
- c.findEntry(id).triggerDelete()
- c.mu.Unlock()
-}
-
-// c.mu must be held by the caller
-func (c *channelMap) decrTraceRefCount(id int64) {
- e := c.findEntry(id)
- if v, ok := e.(tracedChannel); ok {
- v.decrTraceRefCount()
- e.deleteSelfIfReady()
- }
-}
-
-// c.mu must be held by the caller.
-func (c *channelMap) findEntry(id int64) entry {
- var v entry
- var ok bool
- if v, ok = c.channels[id]; ok {
- return v
- }
- if v, ok = c.subChannels[id]; ok {
- return v
- }
- if v, ok = c.servers[id]; ok {
- return v
- }
- if v, ok = c.listenSockets[id]; ok {
- return v
- }
- if v, ok = c.normalSockets[id]; ok {
- return v
- }
- return &dummyEntry{idNotFound: id}
-}
-
-// c.mu must be held by the caller
-// deleteEntry simply deletes an entry from the channelMap. Before calling this
-// method, caller must check this entry is ready to be deleted, i.e removeEntry()
-// has been called on it, and no children still exist.
-// Conditionals are ordered by the expected frequency of deletion of each entity
-// type, in order to optimize performance.
-func (c *channelMap) deleteEntry(id int64) {
- var ok bool
- if _, ok = c.normalSockets[id]; ok {
- delete(c.normalSockets, id)
- return
- }
- if _, ok = c.subChannels[id]; ok {
- delete(c.subChannels, id)
- return
- }
- if _, ok = c.channels[id]; ok {
- delete(c.channels, id)
- delete(c.topLevelChannels, id)
- return
- }
- if _, ok = c.listenSockets[id]; ok {
- delete(c.listenSockets, id)
- return
- }
- if _, ok = c.servers[id]; ok {
- delete(c.servers, id)
- return
- }
-}
-
-func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
- c.mu.Lock()
- child := c.findEntry(id)
- childTC, ok := child.(tracedChannel)
- if !ok {
- c.mu.Unlock()
- return
- }
- childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
- if desc.Parent != nil {
- parent := c.findEntry(child.getParentID())
- var chanType RefChannelType
- switch child.(type) {
- case *channel:
- chanType = RefChannel
- case *subChannel:
- chanType = RefSubChannel
- }
- if parentTC, ok := parent.(tracedChannel); ok {
- parentTC.getChannelTrace().append(&TraceEvent{
- Desc: desc.Parent.Desc,
- Severity: desc.Parent.Severity,
- Timestamp: time.Now(),
- RefID: id,
- RefName: childTC.getRefName(),
- RefType: chanType,
- })
- childTC.incrTraceRefCount()
- }
- }
- c.mu.Unlock()
-}
-
-type int64Slice []int64
-
-func (s int64Slice) Len() int { return len(s) }
-func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-
-func copyMap(m map[int64]string) map[int64]string {
- n := make(map[int64]string)
- for k, v := range m {
- n[k] = v
- }
- return n
-}
-
-func min(a, b int64) int64 {
- if a < b {
- return a
- }
- return b
-}
-
-func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
- if maxResults <= 0 {
- maxResults = EntryPerPage
- }
- c.mu.RLock()
- l := int64(len(c.topLevelChannels))
- ids := make([]int64, 0, l)
- cns := make([]*channel, 0, min(l, maxResults))
-
- for k := range c.topLevelChannels {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := int64(0)
- var end bool
- var t []*ChannelMetric
- for i, v := range ids[idx:] {
- if count == maxResults {
- break
- }
- if cn, ok := c.channels[v]; ok {
- cns = append(cns, cn)
- t = append(t, &ChannelMetric{
- NestedChans: copyMap(cn.nestedChans),
- SubChans: copyMap(cn.subChans),
- })
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
-
- for i, cn := range cns {
- t[i].ChannelData = cn.c.ChannelzMetric()
- t[i].ID = cn.id
- t[i].RefName = cn.refName
- t[i].Trace = cn.trace.dumpData()
- }
- return t, end
-}
-
-func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
- if maxResults <= 0 {
- maxResults = EntryPerPage
- }
- c.mu.RLock()
- l := int64(len(c.servers))
- ids := make([]int64, 0, l)
- ss := make([]*server, 0, min(l, maxResults))
- for k := range c.servers {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := int64(0)
- var end bool
- var s []*ServerMetric
- for i, v := range ids[idx:] {
- if count == maxResults {
- break
- }
- if svr, ok := c.servers[v]; ok {
- ss = append(ss, svr)
- s = append(s, &ServerMetric{
- ListenSockets: copyMap(svr.listenSockets),
- })
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
-
- for i, svr := range ss {
- s[i].ServerData = svr.s.ChannelzMetric()
- s[i].ID = svr.id
- s[i].RefName = svr.refName
- }
- return s, end
-}
-
-func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
- if maxResults <= 0 {
- maxResults = EntryPerPage
- }
- var svr *server
- var ok bool
- c.mu.RLock()
- if svr, ok = c.servers[id]; !ok {
- // server with id doesn't exist.
- c.mu.RUnlock()
- return nil, true
- }
- svrskts := svr.sockets
- l := int64(len(svrskts))
- ids := make([]int64, 0, l)
- sks := make([]*normalSocket, 0, min(l, maxResults))
- for k := range svrskts {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
- count := int64(0)
- var end bool
- for i, v := range ids[idx:] {
- if count == maxResults {
- break
- }
- if ns, ok := c.normalSockets[v]; ok {
- sks = append(sks, ns)
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
- s := make([]*SocketMetric, 0, len(sks))
- for _, ns := range sks {
- sm := &SocketMetric{}
- sm.SocketData = ns.s.ChannelzMetric()
- sm.ID = ns.id
- sm.RefName = ns.refName
- s = append(s, sm)
- }
- return s, end
-}
-
-func (c *channelMap) GetChannel(id int64) *ChannelMetric {
- cm := &ChannelMetric{}
- var cn *channel
- var ok bool
- c.mu.RLock()
- if cn, ok = c.channels[id]; !ok {
- // channel with id doesn't exist.
- c.mu.RUnlock()
- return nil
- }
- cm.NestedChans = copyMap(cn.nestedChans)
- cm.SubChans = copyMap(cn.subChans)
- // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
- // holding the lock to prevent potential data race.
- chanCopy := cn.c
- c.mu.RUnlock()
- cm.ChannelData = chanCopy.ChannelzMetric()
- cm.ID = cn.id
- cm.RefName = cn.refName
- cm.Trace = cn.trace.dumpData()
- return cm
-}
-
-func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
- cm := &SubChannelMetric{}
- var sc *subChannel
- var ok bool
- c.mu.RLock()
- if sc, ok = c.subChannels[id]; !ok {
- // subchannel with id doesn't exist.
- c.mu.RUnlock()
- return nil
- }
- cm.Sockets = copyMap(sc.sockets)
- // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
- // holding the lock to prevent potential data race.
- chanCopy := sc.c
- c.mu.RUnlock()
- cm.ChannelData = chanCopy.ChannelzMetric()
- cm.ID = sc.id
- cm.RefName = sc.refName
- cm.Trace = sc.trace.dumpData()
- return cm
-}
-
-func (c *channelMap) GetSocket(id int64) *SocketMetric {
- sm := &SocketMetric{}
- c.mu.RLock()
- if ls, ok := c.listenSockets[id]; ok {
- c.mu.RUnlock()
- sm.SocketData = ls.s.ChannelzMetric()
- sm.ID = ls.id
- sm.RefName = ls.refName
- return sm
- }
- if ns, ok := c.normalSockets[id]; ok {
- c.mu.RUnlock()
- sm.SocketData = ns.s.ChannelzMetric()
- sm.ID = ns.id
- sm.RefName = ns.refName
- return sm
- }
- c.mu.RUnlock()
- return nil
-}
-
-func (c *channelMap) GetServer(id int64) *ServerMetric {
- sm := &ServerMetric{}
- var svr *server
- var ok bool
- c.mu.RLock()
- if svr, ok = c.servers[id]; !ok {
- c.mu.RUnlock()
- return nil
- }
- sm.ListenSockets = copyMap(svr.listenSockets)
- c.mu.RUnlock()
- sm.ID = svr.id
- sm.RefName = svr.refName
- sm.ServerData = svr.s.ChannelzMetric()
- return sm
+ db.removeEntry(id)
}
// IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
@@ -754,3 +220,11 @@ func (i *IDGenerator) Reset() {
func (i *IDGenerator) genID() int64 {
return atomic.AddInt64(&i.id, 1)
}
+
+// Identifier is an opaque channelz identifier used to expose channelz symbols
+// outside of grpc. Currently only implemented by Channel since no other
+// types require exposure outside grpc.
+type Identifier interface {
+ Entity
+ channelzIdentifier()
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go
deleted file mode 100644
index c9a27acd3..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/id.go
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- *
- * Copyright 2022 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import "fmt"
-
-// Identifier is an opaque identifier which uniquely identifies an entity in the
-// channelz database.
-type Identifier struct {
- typ RefChannelType
- id int64
- str string
- pid *Identifier
-}
-
-// Type returns the entity type corresponding to id.
-func (id *Identifier) Type() RefChannelType {
- return id.typ
-}
-
-// Int returns the integer identifier corresponding to id.
-func (id *Identifier) Int() int64 {
- return id.id
-}
-
-// String returns a string representation of the entity corresponding to id.
-//
-// This includes some information about the parent as well. Examples:
-// Top-level channel: [Channel #channel-number]
-// Nested channel: [Channel #parent-channel-number Channel #channel-number]
-// Sub channel: [Channel #parent-channel SubChannel #subchannel-number]
-func (id *Identifier) String() string {
- return id.str
-}
-
-// Equal returns true if other is the same as id.
-func (id *Identifier) Equal(other *Identifier) bool {
- if (id != nil) != (other != nil) {
- return false
- }
- if id == nil && other == nil {
- return true
- }
- return id.typ == other.typ && id.id == other.id && id.pid == other.pid
-}
-
-// NewIdentifierForTesting returns a new opaque identifier to be used only for
-// testing purposes.
-func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier {
- return newIdentifer(typ, id, pid)
-}
-
-func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier {
- str := fmt.Sprintf("%s #%d", typ, id)
- if pid != nil {
- str = fmt.Sprintf("%s %s", pid, str)
- }
- return &Identifier{typ: typ, id: id, str: str, pid: pid}
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
index f89e6f77b..ee4d72125 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/logging.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go
@@ -26,53 +26,49 @@ import (
var logger = grpclog.Component("channelz")
-func withParens(id *Identifier) string {
- return "[" + id.String() + "] "
-}
-
// Info logs and adds a trace event if channelz is on.
-func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
+func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
Desc: fmt.Sprint(args...),
Severity: CtInfo,
})
}
// Infof logs and adds a trace event if channelz is on.
-func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
+func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
Desc: fmt.Sprintf(format, args...),
Severity: CtInfo,
})
}
// Warning logs and adds a trace event if channelz is on.
-func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
+func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
Desc: fmt.Sprint(args...),
Severity: CtWarning,
})
}
// Warningf logs and adds a trace event if channelz is on.
-func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
+func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
Desc: fmt.Sprintf(format, args...),
Severity: CtWarning,
})
}
// Error logs and adds a trace event if channelz is on.
-func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
+func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
Desc: fmt.Sprint(args...),
Severity: CtError,
})
}
// Errorf logs and adds a trace event if channelz is on.
-func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) {
- AddTraceEvent(l, id, 1, &TraceEventDesc{
+func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
Desc: fmt.Sprintf(format, args...),
Severity: CtError,
})
diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go
new file mode 100644
index 000000000..b5a824992
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/server.go
@@ -0,0 +1,121 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sync/atomic"
+)
+
+// Server is the channelz representation of a server.
+type Server struct {
+ Entity
+ ID int64
+ RefName string
+
+ ServerMetrics ServerMetrics
+
+ closeCalled bool
+ sockets map[int64]string
+ listenSockets map[int64]string
+ cm *channelMap
+}
+
+// ServerMetrics defines a struct containing metrics for servers.
+type ServerMetrics struct {
+ // The number of incoming calls started on the server.
+ CallsStarted atomic.Int64
+ // The number of incoming calls that have completed with an OK status.
+ CallsSucceeded atomic.Int64
+ // The number of incoming calls that have a completed with a non-OK status.
+ CallsFailed atomic.Int64
+ // The last time a call was started on the server.
+ LastCallStartedTimestamp atomic.Int64
+}
+
+// NewServerMetricsForTesting returns an initialized ServerMetrics.
+func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics {
+ sm := &ServerMetrics{}
+ sm.CallsStarted.Store(started)
+ sm.CallsSucceeded.Store(succeeded)
+ sm.CallsFailed.Store(failed)
+ sm.LastCallStartedTimestamp.Store(timestamp)
+ return sm
+}
+
+// CopyFrom copies the metrics data from the provided ServerMetrics
+// instance into the current instance.
+func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
+ sm.CallsStarted.Store(o.CallsStarted.Load())
+ sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
+ sm.CallsFailed.Store(o.CallsFailed.Load())
+ sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
+}
+
+// ListenSockets returns the listening sockets for s.
+func (s *Server) ListenSockets() map[int64]string {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return copyMap(s.listenSockets)
+}
+
+// String returns a printable description of s.
+func (s *Server) String() string {
+ return fmt.Sprintf("Server #%d", s.ID)
+}
+
+func (s *Server) id() int64 {
+ return s.ID
+}
+
+func (s *Server) addChild(id int64, e entry) {
+ switch v := e.(type) {
+ case *Socket:
+ switch v.SocketType {
+ case SocketTypeNormal:
+ s.sockets[id] = v.RefName
+ case SocketTypeListen:
+ s.listenSockets[id] = v.RefName
+ }
+ default:
+ logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+ }
+}
+
+func (s *Server) deleteChild(id int64) {
+ delete(s.sockets, id)
+ delete(s.listenSockets, id)
+ s.deleteSelfIfReady()
+}
+
+func (s *Server) triggerDelete() {
+ s.closeCalled = true
+ s.deleteSelfIfReady()
+}
+
+func (s *Server) deleteSelfIfReady() {
+ if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
+ return
+ }
+ s.cm.deleteEntry(s.ID)
+}
+
+func (s *Server) getParentID() int64 {
+ return 0
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go
new file mode 100644
index 000000000..90103847c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go
@@ -0,0 +1,137 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "net"
+ "sync/atomic"
+
+ "google.golang.org/grpc/credentials"
+)
+
+// SocketMetrics defines the struct that the implementor of Socket interface
+// should return from ChannelzMetric().
+type SocketMetrics struct {
+ // The number of streams that have been started.
+ StreamsStarted atomic.Int64
+ // The number of streams that have ended successfully:
+ // On client side, receiving frame with eos bit set.
+ // On server side, sending frame with eos bit set.
+ StreamsSucceeded atomic.Int64
+ // The number of streams that have ended unsuccessfully:
+ // On client side, termination without receiving frame with eos bit set.
+ // On server side, termination without sending frame with eos bit set.
+ StreamsFailed atomic.Int64
+ // The number of messages successfully sent on this socket.
+ MessagesSent atomic.Int64
+ MessagesReceived atomic.Int64
+ // The number of keep alives sent. This is typically implemented with HTTP/2
+ // ping messages.
+ KeepAlivesSent atomic.Int64
+ // The last time a stream was created by this endpoint. Usually unset for
+ // servers.
+ LastLocalStreamCreatedTimestamp atomic.Int64
+ // The last time a stream was created by the remote endpoint. Usually unset
+ // for clients.
+ LastRemoteStreamCreatedTimestamp atomic.Int64
+ // The last time a message was sent by this endpoint.
+ LastMessageSentTimestamp atomic.Int64
+ // The last time a message was received by this endpoint.
+ LastMessageReceivedTimestamp atomic.Int64
+}
+
+// EphemeralSocketMetrics are metrics that change rapidly and are tracked
+// outside of channelz.
+type EphemeralSocketMetrics struct {
+ // The amount of window, granted to the local endpoint by the remote endpoint.
+ // This may be slightly out of date due to network latency. This does NOT
+ // include stream level or TCP level flow control info.
+ LocalFlowControlWindow int64
+ // The amount of window, granted to the remote endpoint by the local endpoint.
+ // This may be slightly out of date due to network latency. This does NOT
+ // include stream level or TCP level flow control info.
+ RemoteFlowControlWindow int64
+}
+
+// SocketType represents the type of socket.
+type SocketType string
+
+// SocketType can be one of these.
+const (
+ SocketTypeNormal = "NormalSocket"
+ SocketTypeListen = "ListenSocket"
+)
+
+// Socket represents a socket within channelz which includes socket
+// metrics and data related to socket activity and provides methods
+// for managing and interacting with sockets.
+type Socket struct {
+ Entity
+ SocketType SocketType
+ ID int64
+ Parent Entity
+ cm *channelMap
+ SocketMetrics SocketMetrics
+ EphemeralMetrics func() *EphemeralSocketMetrics
+
+ RefName string
+ // The locally bound address. Immutable.
+ LocalAddr net.Addr
+ // The remote bound address. May be absent. Immutable.
+ RemoteAddr net.Addr
+ // Optional, represents the name of the remote endpoint, if different than
+ // the original target name. Immutable.
+ RemoteName string
+ // Immutable.
+ SocketOptions *SocketOptionData
+ // Immutable.
+ Security credentials.ChannelzSecurityValue
+}
+
+// String returns a string representation of the Socket, including its parent
+// entity, socket type, and ID.
+func (ls *Socket) String() string {
+ return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
+}
+
+func (ls *Socket) id() int64 {
+ return ls.ID
+}
+
+func (ls *Socket) addChild(id int64, e entry) {
+ logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+}
+
+func (ls *Socket) deleteChild(id int64) {
+ logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+}
+
+func (ls *Socket) triggerDelete() {
+ ls.cm.deleteEntry(ls.ID)
+ ls.Parent.(entry).deleteChild(ls.ID)
+}
+
+func (ls *Socket) deleteSelfIfReady() {
+ logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
+}
+
+func (ls *Socket) getParentID() int64 {
+ return ls.Parent.id()
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
new file mode 100644
index 000000000..b20802e6e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
@@ -0,0 +1,153 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sync/atomic"
+)
+
+// SubChannel is the channelz representation of a subchannel.
+type SubChannel struct {
+ Entity
+ // ID is the channelz id of this subchannel.
+ ID int64
+ // RefName is the human readable reference string of this subchannel.
+ RefName string
+ closeCalled bool
+ sockets map[int64]string
+ parent *Channel
+ trace *ChannelTrace
+ traceRefCount int32
+
+ ChannelMetrics ChannelMetrics
+}
+
+func (sc *SubChannel) String() string {
+ return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID)
+}
+
+func (sc *SubChannel) id() int64 {
+ return sc.ID
+}
+
+// Sockets returns a copy of the sockets map associated with the SubChannel.
+func (sc *SubChannel) Sockets() map[int64]string {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return copyMap(sc.sockets)
+}
+
+// Trace returns a copy of the ChannelTrace associated with the SubChannel.
+func (sc *SubChannel) Trace() *ChannelTrace {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return sc.trace.copy()
+}
+
+func (sc *SubChannel) addChild(id int64, e entry) {
+ if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal {
+ sc.sockets[id] = v.RefName
+ } else {
+ logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+ }
+}
+
+func (sc *SubChannel) deleteChild(id int64) {
+ delete(sc.sockets, id)
+ sc.deleteSelfIfReady()
+}
+
+func (sc *SubChannel) triggerDelete() {
+ sc.closeCalled = true
+ sc.deleteSelfIfReady()
+}
+
+func (sc *SubChannel) getParentID() int64 {
+ return sc.parent.ID
+}
+
+// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
+// means deleting the subchannel reference from its parent's child list.
+//
+// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
+// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (sc *SubChannel) deleteSelfFromTree() (deleted bool) {
+ if !sc.closeCalled || len(sc.sockets) != 0 {
+ return false
+ }
+ sc.parent.deleteChild(sc.ID)
+ return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
+// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
+// the subchannel, and its memory will be garbage collected.
+//
+// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (sc *SubChannel) deleteSelfFromMap() (delete bool) {
+ return sc.getTraceRefCount() == 0
+}
+
+// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
+// The delete process includes two steps:
+// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
+// its parent's child list.
+// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
+// by id will return entry not found error.
+func (sc *SubChannel) deleteSelfIfReady() {
+ if !sc.deleteSelfFromTree() {
+ return
+ }
+ if !sc.deleteSelfFromMap() {
+ return
+ }
+ db.deleteEntry(sc.ID)
+ sc.trace.clear()
+}
+
+func (sc *SubChannel) getChannelTrace() *ChannelTrace {
+ return sc.trace
+}
+
+func (sc *SubChannel) incrTraceRefCount() {
+ atomic.AddInt32(&sc.traceRefCount, 1)
+}
+
+func (sc *SubChannel) decrTraceRefCount() {
+ atomic.AddInt32(&sc.traceRefCount, -1)
+}
+
+func (sc *SubChannel) getTraceRefCount() int {
+ i := atomic.LoadInt32(&sc.traceRefCount)
+ return int(i)
+}
+
+func (sc *SubChannel) getRefName() string {
+ return sc.RefName
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
similarity index 83%
rename from vendor/google.golang.org/grpc/internal/channelz/types_linux.go
rename to vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
index 1b1c4cce3..5ac73ff83 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
@@ -49,3 +49,17 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) {
s.TCPInfo = v
}
}
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(socket any) *SocketOptionData {
+ c, ok := socket.(syscall.Conn)
+ if !ok {
+ return nil
+ }
+ data := &SocketOptionData{}
+ if rawConn, err := c.SyscallConn(); err == nil {
+ rawConn.Control(data.Getsockopt)
+ return data
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
similarity index 87%
rename from vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
rename to vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
index 8b06eed1a..0e6e18e18 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
@@ -1,5 +1,4 @@
//go:build !linux
-// +build !linux
/*
*
@@ -36,8 +35,13 @@ type SocketOptionData struct {
// Getsockopt defines the function to get socket options requested by channelz.
// It is to be passed to syscall.RawConn.Control().
// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
+func (s *SocketOptionData) Getsockopt(uintptr) {
once.Do(func() {
logger.Warning("Channelz: socket options are not supported on non-linux environments")
})
}
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(any) *SocketOptionData {
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
new file mode 100644
index 000000000..2bffe4777
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go
@@ -0,0 +1,213 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/grpc/grpclog"
+)
+
+const (
+ defaultMaxTraceEntry int32 = 30
+)
+
+var maxTraceEntry = defaultMaxTraceEntry
+
+// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e.
+// channel/subchannel). Setting it to 0 will disable channel tracing.
+func SetMaxTraceEntry(i int32) {
+ atomic.StoreInt32(&maxTraceEntry, i)
+}
+
+// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per
+// entity to default.
+func ResetMaxTraceEntryToDefault() {
+ atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
+}
+
+func getMaxTraceEntry() int {
+ i := atomic.LoadInt32(&maxTraceEntry)
+ return int(i)
+}
+
+// traceEvent is an internal representation of a single trace event
+type traceEvent struct {
+ // Desc is a simple description of the trace event.
+ Desc string
+ // Severity states the severity of this trace event.
+ Severity Severity
+ // Timestamp is the event time.
+ Timestamp time.Time
+ // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
+ // involved in this event.
+ // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
+ RefID int64
+ // RefName is the reference name for the entity that gets referenced in the event.
+ RefName string
+ // RefType indicates the referenced entity type, i.e Channel or SubChannel.
+ RefType RefChannelType
+}
+
+// TraceEvent is what the caller of AddTraceEvent should provide to describe the
+// event to be added to the channel trace.
+//
+// The Parent field is optional. It is used for an event that will be recorded
+// in the entity's parent trace.
+type TraceEvent struct {
+ Desc string
+ Severity Severity
+ Parent *TraceEvent
+}
+
+// ChannelTrace provides tracing information for a channel.
+// It tracks various events and metadata related to the channel's lifecycle
+// and operations.
+type ChannelTrace struct {
+ cm *channelMap
+ clearCalled bool
+ // The time when the trace was created.
+ CreationTime time.Time
+ // A counter for the number of events recorded in the
+ // trace.
+ EventNum int64
+ mu sync.Mutex
+ // A slice of traceEvent pointers representing the events recorded for
+ // this channel.
+ Events []*traceEvent
+}
+
+func (c *ChannelTrace) copy() *ChannelTrace {
+ return &ChannelTrace{
+ CreationTime: c.CreationTime,
+ EventNum: c.EventNum,
+ Events: append(([]*traceEvent)(nil), c.Events...),
+ }
+}
+
+func (c *ChannelTrace) append(e *traceEvent) {
+ c.mu.Lock()
+ if len(c.Events) == getMaxTraceEntry() {
+ del := c.Events[0]
+ c.Events = c.Events[1:]
+ if del.RefID != 0 {
+ // start recursive cleanup in a goroutine to not block the call originated from grpc.
+ go func() {
+ // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
+ c.cm.mu.Lock()
+ c.cm.decrTraceRefCount(del.RefID)
+ c.cm.mu.Unlock()
+ }()
+ }
+ }
+ e.Timestamp = time.Now()
+ c.Events = append(c.Events, e)
+ c.EventNum++
+ c.mu.Unlock()
+}
+
+func (c *ChannelTrace) clear() {
+ if c.clearCalled {
+ return
+ }
+ c.clearCalled = true
+ c.mu.Lock()
+ for _, e := range c.Events {
+ if e.RefID != 0 {
+ // caller should have already held the c.cm.mu lock.
+ c.cm.decrTraceRefCount(e.RefID)
+ }
+ }
+ c.mu.Unlock()
+}
+
+// Severity is the severity level of a trace event.
+// The canonical enumeration of all valid values is here:
+// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
+type Severity int
+
+const (
+ // CtUnknown indicates unknown severity of a trace event.
+ CtUnknown Severity = iota
+ // CtInfo indicates info level severity of a trace event.
+ CtInfo
+ // CtWarning indicates warning level severity of a trace event.
+ CtWarning
+ // CtError indicates error level severity of a trace event.
+ CtError
+)
+
+// RefChannelType is the type of the entity being referenced in a trace event.
+type RefChannelType int
+
+const (
+ // RefUnknown indicates an unknown entity type, the zero value for this type.
+ RefUnknown RefChannelType = iota
+ // RefChannel indicates the referenced entity is a Channel.
+ RefChannel
+ // RefSubChannel indicates the referenced entity is a SubChannel.
+ RefSubChannel
+ // RefServer indicates the referenced entity is a Server.
+ RefServer
+ // RefListenSocket indicates the referenced entity is a ListenSocket.
+ RefListenSocket
+ // RefNormalSocket indicates the referenced entity is a NormalSocket.
+ RefNormalSocket
+)
+
+var refChannelTypeToString = map[RefChannelType]string{
+ RefUnknown: "Unknown",
+ RefChannel: "Channel",
+ RefSubChannel: "SubChannel",
+ RefServer: "Server",
+ RefListenSocket: "ListenSocket",
+ RefNormalSocket: "NormalSocket",
+}
+
+// String returns a string representation of the RefChannelType
+func (r RefChannelType) String() string {
+ return refChannelTypeToString[r]
+}
+
+// AddTraceEvent adds trace related to the entity with specified id, using the
+// provided TraceEventDesc.
+//
+// If channelz is not turned ON, this will simply log the event descriptions.
+func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
+ // Log only the trace description associated with the bottom most entity.
+ d := fmt.Sprintf("[%s]%s", e, desc.Desc)
+ switch desc.Severity {
+ case CtUnknown, CtInfo:
+ l.InfoDepth(depth+1, d)
+ case CtWarning:
+ l.WarningDepth(depth+1, d)
+ case CtError:
+ l.ErrorDepth(depth+1, d)
+ }
+
+ if getMaxTraceEntry() == 0 {
+ return
+ }
+ if IsOn() {
+ db.traceEvent(e.id(), desc)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
deleted file mode 100644
index 1d4020f53..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
+++ /dev/null
@@ -1,727 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
-)
-
-// entry represents a node in the channelz database.
-type entry interface {
- // addChild adds a child e, whose channelz id is id to child list
- addChild(id int64, e entry)
- // deleteChild deletes a child with channelz id to be id from child list
- deleteChild(id int64)
- // triggerDelete tries to delete self from channelz database. However, if child
- // list is not empty, then deletion from the database is on hold until the last
- // child is deleted from database.
- triggerDelete()
- // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
- // list is now empty. If both conditions are met, then delete self from database.
- deleteSelfIfReady()
- // getParentID returns parent ID of the entry. 0 value parent ID means no parent.
- getParentID() int64
-}
-
-// dummyEntry is a fake entry to handle entry not found case.
-type dummyEntry struct {
- idNotFound int64
-}
-
-func (d *dummyEntry) addChild(id int64, e entry) {
- // Note: It is possible for a normal program to reach here under race condition.
- // For example, there could be a race between ClientConn.Close() info being propagated
- // to addrConn and http2Client. ClientConn.Close() cancel the context and result
- // in http2Client to error. The error info is then caught by transport monitor
- // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
- // the addrConn will create a new transport. And when registering the new transport in
- // channelz, its parent addrConn could have already been torn down and deleted
- // from channelz tracking, and thus reach the code here.
- logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
-}
-
-func (d *dummyEntry) deleteChild(id int64) {
- // It is possible for a normal program to reach here under race condition.
- // Refer to the example described in addChild().
- logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
-}
-
-func (d *dummyEntry) triggerDelete() {
- logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
-}
-
-func (*dummyEntry) deleteSelfIfReady() {
- // code should not reach here. deleteSelfIfReady is always called on an existing entry.
-}
-
-func (*dummyEntry) getParentID() int64 {
- return 0
-}
-
-// ChannelMetric defines the info channelz provides for a specific Channel, which
-// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ChannelMetric struct {
- // ID is the channelz id of this channel.
- ID int64
- // RefName is the human readable reference string of this channel.
- RefName string
- // ChannelData contains channel internal metric reported by the channel through
- // ChannelzMetric().
- ChannelData *ChannelInternalMetric
- // NestedChans tracks the nested channel type children of this channel in the format of
- // a map from nested channel channelz id to corresponding reference string.
- NestedChans map[int64]string
- // SubChans tracks the subchannel type children of this channel in the format of a
- // map from subchannel channelz id to corresponding reference string.
- SubChans map[int64]string
- // Sockets tracks the socket type children of this channel in the format of a map
- // from socket channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow channel having sockets directly,
- // therefore, this is field is unused.
- Sockets map[int64]string
- // Trace contains the most recent traced events.
- Trace *ChannelTrace
-}
-
-// SubChannelMetric defines the info channelz provides for a specific SubChannel,
-// which includes ChannelInternalMetric and channelz-specific data, such as
-// channelz id, child list, etc.
-type SubChannelMetric struct {
- // ID is the channelz id of this subchannel.
- ID int64
- // RefName is the human readable reference string of this subchannel.
- RefName string
- // ChannelData contains subchannel internal metric reported by the subchannel
- // through ChannelzMetric().
- ChannelData *ChannelInternalMetric
- // NestedChans tracks the nested channel type children of this subchannel in the format of
- // a map from nested channel channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow subchannel to have nested channels
- // as children, therefore, this field is unused.
- NestedChans map[int64]string
- // SubChans tracks the subchannel type children of this subchannel in the format of a
- // map from subchannel channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow subchannel to have subchannels
- // as children, therefore, this field is unused.
- SubChans map[int64]string
- // Sockets tracks the socket type children of this subchannel in the format of a map
- // from socket channelz id to corresponding reference string.
- Sockets map[int64]string
- // Trace contains the most recent traced events.
- Trace *ChannelTrace
-}
-
-// ChannelInternalMetric defines the struct that the implementor of Channel interface
-// should return from ChannelzMetric().
-type ChannelInternalMetric struct {
- // current connectivity state of the channel.
- State connectivity.State
- // The target this channel originally tried to connect to. May be absent
- Target string
- // The number of calls started on the channel.
- CallsStarted int64
- // The number of calls that have completed with an OK status.
- CallsSucceeded int64
- // The number of calls that have a completed with a non-OK status.
- CallsFailed int64
- // The last time a call was started on the channel.
- LastCallStartedTimestamp time.Time
-}
-
-// ChannelTrace stores traced events on a channel/subchannel and related info.
-type ChannelTrace struct {
- // EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
- EventNum int64
- // CreationTime is the creation time of the trace.
- CreationTime time.Time
- // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
- // oldest one)
- Events []*TraceEvent
-}
-
-// TraceEvent represent a single trace event
-type TraceEvent struct {
- // Desc is a simple description of the trace event.
- Desc string
- // Severity states the severity of this trace event.
- Severity Severity
- // Timestamp is the event time.
- Timestamp time.Time
- // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
- // involved in this event.
- // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
- RefID int64
- // RefName is the reference name for the entity that gets referenced in the event.
- RefName string
- // RefType indicates the referenced entity type, i.e Channel or SubChannel.
- RefType RefChannelType
-}
-
-// Channel is the interface that should be satisfied in order to be tracked by
-// channelz as Channel or SubChannel.
-type Channel interface {
- ChannelzMetric() *ChannelInternalMetric
-}
-
-type dummyChannel struct{}
-
-func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
- return &ChannelInternalMetric{}
-}
-
-type channel struct {
- refName string
- c Channel
- closeCalled bool
- nestedChans map[int64]string
- subChans map[int64]string
- id int64
- pid int64
- cm *channelMap
- trace *channelTrace
- // traceRefCount is the number of trace events that reference this channel.
- // Non-zero traceRefCount means the trace of this channel cannot be deleted.
- traceRefCount int32
-}
-
-func (c *channel) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *subChannel:
- c.subChans[id] = v.refName
- case *channel:
- c.nestedChans[id] = v.refName
- default:
- logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
- }
-}
-
-func (c *channel) deleteChild(id int64) {
- delete(c.subChans, id)
- delete(c.nestedChans, id)
- c.deleteSelfIfReady()
-}
-
-func (c *channel) triggerDelete() {
- c.closeCalled = true
- c.deleteSelfIfReady()
-}
-
-func (c *channel) getParentID() int64 {
- return c.pid
-}
-
-// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
-// deleting the channel reference from its parent's child list.
-//
-// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
-// corresponding grpc object has been invoked, and the channel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (c *channel) deleteSelfFromTree() (deleted bool) {
- if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
- return false
- }
- // not top channel
- if c.pid != 0 {
- c.cm.findEntry(c.pid).deleteChild(c.id)
- }
- return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
-// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
-// channel, and its memory will be garbage collected.
-//
-// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (c *channel) deleteSelfFromMap() (delete bool) {
- if c.getTraceRefCount() != 0 {
- c.c = &dummyChannel{}
- return false
- }
- return true
-}
-
-// deleteSelfIfReady tries to delete the channel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
-// parent's child list.
-// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
-// will return entry not found error.
-func (c *channel) deleteSelfIfReady() {
- if !c.deleteSelfFromTree() {
- return
- }
- if !c.deleteSelfFromMap() {
- return
- }
- c.cm.deleteEntry(c.id)
- c.trace.clear()
-}
-
-func (c *channel) getChannelTrace() *channelTrace {
- return c.trace
-}
-
-func (c *channel) incrTraceRefCount() {
- atomic.AddInt32(&c.traceRefCount, 1)
-}
-
-func (c *channel) decrTraceRefCount() {
- atomic.AddInt32(&c.traceRefCount, -1)
-}
-
-func (c *channel) getTraceRefCount() int {
- i := atomic.LoadInt32(&c.traceRefCount)
- return int(i)
-}
-
-func (c *channel) getRefName() string {
- return c.refName
-}
-
-type subChannel struct {
- refName string
- c Channel
- closeCalled bool
- sockets map[int64]string
- id int64
- pid int64
- cm *channelMap
- trace *channelTrace
- traceRefCount int32
-}
-
-func (sc *subChannel) addChild(id int64, e entry) {
- if v, ok := e.(*normalSocket); ok {
- sc.sockets[id] = v.refName
- } else {
- logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
- }
-}
-
-func (sc *subChannel) deleteChild(id int64) {
- delete(sc.sockets, id)
- sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) triggerDelete() {
- sc.closeCalled = true
- sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) getParentID() int64 {
- return sc.pid
-}
-
-// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
-// means deleting the subchannel reference from its parent's child list.
-//
-// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
-// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
- if !sc.closeCalled || len(sc.sockets) != 0 {
- return false
- }
- sc.cm.findEntry(sc.pid).deleteChild(sc.id)
- return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
-// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
-// the subchannel, and its memory will be garbage collected.
-//
-// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (sc *subChannel) deleteSelfFromMap() (delete bool) {
- if sc.getTraceRefCount() != 0 {
- // free the grpc struct (i.e. addrConn)
- sc.c = &dummyChannel{}
- return false
- }
- return true
-}
-
-// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
-// its parent's child list.
-// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
-// by id will return entry not found error.
-func (sc *subChannel) deleteSelfIfReady() {
- if !sc.deleteSelfFromTree() {
- return
- }
- if !sc.deleteSelfFromMap() {
- return
- }
- sc.cm.deleteEntry(sc.id)
- sc.trace.clear()
-}
-
-func (sc *subChannel) getChannelTrace() *channelTrace {
- return sc.trace
-}
-
-func (sc *subChannel) incrTraceRefCount() {
- atomic.AddInt32(&sc.traceRefCount, 1)
-}
-
-func (sc *subChannel) decrTraceRefCount() {
- atomic.AddInt32(&sc.traceRefCount, -1)
-}
-
-func (sc *subChannel) getTraceRefCount() int {
- i := atomic.LoadInt32(&sc.traceRefCount)
- return int(i)
-}
-
-func (sc *subChannel) getRefName() string {
- return sc.refName
-}
-
-// SocketMetric defines the info channelz provides for a specific Socket, which
-// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
-type SocketMetric struct {
- // ID is the channelz id of this socket.
- ID int64
- // RefName is the human readable reference string of this socket.
- RefName string
- // SocketData contains socket internal metric reported by the socket through
- // ChannelzMetric().
- SocketData *SocketInternalMetric
-}
-
-// SocketInternalMetric defines the struct that the implementor of Socket interface
-// should return from ChannelzMetric().
-type SocketInternalMetric struct {
- // The number of streams that have been started.
- StreamsStarted int64
- // The number of streams that have ended successfully:
- // On client side, receiving frame with eos bit set.
- // On server side, sending frame with eos bit set.
- StreamsSucceeded int64
- // The number of streams that have ended unsuccessfully:
- // On client side, termination without receiving frame with eos bit set.
- // On server side, termination without sending frame with eos bit set.
- StreamsFailed int64
- // The number of messages successfully sent on this socket.
- MessagesSent int64
- MessagesReceived int64
- // The number of keep alives sent. This is typically implemented with HTTP/2
- // ping messages.
- KeepAlivesSent int64
- // The last time a stream was created by this endpoint. Usually unset for
- // servers.
- LastLocalStreamCreatedTimestamp time.Time
- // The last time a stream was created by the remote endpoint. Usually unset
- // for clients.
- LastRemoteStreamCreatedTimestamp time.Time
- // The last time a message was sent by this endpoint.
- LastMessageSentTimestamp time.Time
- // The last time a message was received by this endpoint.
- LastMessageReceivedTimestamp time.Time
- // The amount of window, granted to the local endpoint by the remote endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- LocalFlowControlWindow int64
- // The amount of window, granted to the remote endpoint by the local endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- RemoteFlowControlWindow int64
- // The locally bound address.
- LocalAddr net.Addr
- // The remote bound address. May be absent.
- RemoteAddr net.Addr
- // Optional, represents the name of the remote endpoint, if different than
- // the original target name.
- RemoteName string
- SocketOptions *SocketOptionData
- Security credentials.ChannelzSecurityValue
-}
-
-// Socket is the interface that should be satisfied in order to be tracked by
-// channelz as Socket.
-type Socket interface {
- ChannelzMetric() *SocketInternalMetric
-}
-
-type listenSocket struct {
- refName string
- s Socket
- id int64
- pid int64
- cm *channelMap
-}
-
-func (ls *listenSocket) addChild(id int64, e entry) {
- logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
-}
-
-func (ls *listenSocket) deleteChild(id int64) {
- logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
-}
-
-func (ls *listenSocket) triggerDelete() {
- ls.cm.deleteEntry(ls.id)
- ls.cm.findEntry(ls.pid).deleteChild(ls.id)
-}
-
-func (ls *listenSocket) deleteSelfIfReady() {
- logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
-}
-
-func (ls *listenSocket) getParentID() int64 {
- return ls.pid
-}
-
-type normalSocket struct {
- refName string
- s Socket
- id int64
- pid int64
- cm *channelMap
-}
-
-func (ns *normalSocket) addChild(id int64, e entry) {
- logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
-}
-
-func (ns *normalSocket) deleteChild(id int64) {
- logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
-}
-
-func (ns *normalSocket) triggerDelete() {
- ns.cm.deleteEntry(ns.id)
- ns.cm.findEntry(ns.pid).deleteChild(ns.id)
-}
-
-func (ns *normalSocket) deleteSelfIfReady() {
- logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
-}
-
-func (ns *normalSocket) getParentID() int64 {
- return ns.pid
-}
-
-// ServerMetric defines the info channelz provides for a specific Server, which
-// includes ServerInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ServerMetric struct {
- // ID is the channelz id of this server.
- ID int64
- // RefName is the human readable reference string of this server.
- RefName string
- // ServerData contains server internal metric reported by the server through
- // ChannelzMetric().
- ServerData *ServerInternalMetric
- // ListenSockets tracks the listener socket type children of this server in the
- // format of a map from socket channelz id to corresponding reference string.
- ListenSockets map[int64]string
-}
-
-// ServerInternalMetric defines the struct that the implementor of Server interface
-// should return from ChannelzMetric().
-type ServerInternalMetric struct {
- // The number of incoming calls started on the server.
- CallsStarted int64
- // The number of incoming calls that have completed with an OK status.
- CallsSucceeded int64
- // The number of incoming calls that have a completed with a non-OK status.
- CallsFailed int64
- // The last time a call was started on the server.
- LastCallStartedTimestamp time.Time
-}
-
-// Server is the interface to be satisfied in order to be tracked by channelz as
-// Server.
-type Server interface {
- ChannelzMetric() *ServerInternalMetric
-}
-
-type server struct {
- refName string
- s Server
- closeCalled bool
- sockets map[int64]string
- listenSockets map[int64]string
- id int64
- cm *channelMap
-}
-
-func (s *server) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *normalSocket:
- s.sockets[id] = v.refName
- case *listenSocket:
- s.listenSockets[id] = v.refName
- default:
- logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
- }
-}
-
-func (s *server) deleteChild(id int64) {
- delete(s.sockets, id)
- delete(s.listenSockets, id)
- s.deleteSelfIfReady()
-}
-
-func (s *server) triggerDelete() {
- s.closeCalled = true
- s.deleteSelfIfReady()
-}
-
-func (s *server) deleteSelfIfReady() {
- if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
- return
- }
- s.cm.deleteEntry(s.id)
-}
-
-func (s *server) getParentID() int64 {
- return 0
-}
-
-type tracedChannel interface {
- getChannelTrace() *channelTrace
- incrTraceRefCount()
- decrTraceRefCount()
- getRefName() string
-}
-
-type channelTrace struct {
- cm *channelMap
- clearCalled bool
- createdTime time.Time
- eventCount int64
- mu sync.Mutex
- events []*TraceEvent
-}
-
-func (c *channelTrace) append(e *TraceEvent) {
- c.mu.Lock()
- if len(c.events) == getMaxTraceEntry() {
- del := c.events[0]
- c.events = c.events[1:]
- if del.RefID != 0 {
- // start recursive cleanup in a goroutine to not block the call originated from grpc.
- go func() {
- // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
- c.cm.mu.Lock()
- c.cm.decrTraceRefCount(del.RefID)
- c.cm.mu.Unlock()
- }()
- }
- }
- e.Timestamp = time.Now()
- c.events = append(c.events, e)
- c.eventCount++
- c.mu.Unlock()
-}
-
-func (c *channelTrace) clear() {
- if c.clearCalled {
- return
- }
- c.clearCalled = true
- c.mu.Lock()
- for _, e := range c.events {
- if e.RefID != 0 {
- // caller should have already held the c.cm.mu lock.
- c.cm.decrTraceRefCount(e.RefID)
- }
- }
- c.mu.Unlock()
-}
-
-// Severity is the severity level of a trace event.
-// The canonical enumeration of all valid values is here:
-// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
-type Severity int
-
-const (
- // CtUnknown indicates unknown severity of a trace event.
- CtUnknown Severity = iota
- // CtInfo indicates info level severity of a trace event.
- CtInfo
- // CtWarning indicates warning level severity of a trace event.
- CtWarning
- // CtError indicates error level severity of a trace event.
- CtError
-)
-
-// RefChannelType is the type of the entity being referenced in a trace event.
-type RefChannelType int
-
-const (
- // RefUnknown indicates an unknown entity type, the zero value for this type.
- RefUnknown RefChannelType = iota
- // RefChannel indicates the referenced entity is a Channel.
- RefChannel
- // RefSubChannel indicates the referenced entity is a SubChannel.
- RefSubChannel
- // RefServer indicates the referenced entity is a Server.
- RefServer
- // RefListenSocket indicates the referenced entity is a ListenSocket.
- RefListenSocket
- // RefNormalSocket indicates the referenced entity is a NormalSocket.
- RefNormalSocket
-)
-
-var refChannelTypeToString = map[RefChannelType]string{
- RefUnknown: "Unknown",
- RefChannel: "Channel",
- RefSubChannel: "SubChannel",
- RefServer: "Server",
- RefListenSocket: "ListenSocket",
- RefNormalSocket: "NormalSocket",
-}
-
-func (r RefChannelType) String() string {
- return refChannelTypeToString[r]
-}
-
-func (c *channelTrace) dumpData() *ChannelTrace {
- c.mu.Lock()
- ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
- ct.Events = c.events[:len(c.events)]
- c.mu.Unlock()
- return ct
-}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 3cf10ddfb..6e7dd6b77 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -28,17 +28,11 @@ import (
var (
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
- // AdvertiseCompressors is set if registered compressor should be advertised
- // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false").
- AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true)
// RingHashCap indicates the maximum ring size which defaults to 4096
// entries but may be overridden by setting the environment variable
// "GRPC_RING_HASH_CAP". This does not override the default bounds
// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
- // PickFirstLBConfig is set if we should support configuration of the
- // pick_first LB policy.
- PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true)
// LeastRequestLB is set if we should support the least_request_experimental
// LB policy, which can be enabled by setting the environment variable
// "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true".
@@ -46,6 +40,21 @@ var (
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
// handshakes that can be performed.
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
+ // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
+ // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
+ // option is present for backward compatibility. This option may be overridden
+ // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
+ // or "false".
+ EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
+ // XDSFallbackSupport is the env variable that controls whether support for
+ // xDS fallback is turned on. If this is unset or is false, only the first
+ // xDS server in the list of server configs will be used.
+ XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false)
+ // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
+ // instead of the exiting pickfirst implementation. This can be enabled by
+ // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
+ // to "true".
+ NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index 02b4b6a1c..29f234acb 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -50,46 +50,7 @@ var (
//
// When both bootstrap FileName and FileContent are set, FileName is used.
XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
- // XDSRingHash indicates whether ring hash support is enabled, which can be
- // disabled by setting the environment variable
- // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
- XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true)
- // XDSClientSideSecurity is used to control processing of security
- // configuration on the client-side.
- //
- // Note that there is no env var protection for the server-side because we
- // have a brand new API on the server-side and users explicitly need to use
- // the new API to get security integration on the server.
- XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true)
- // XDSAggregateAndDNS indicates whether processing of aggregated cluster and
- // DNS cluster is enabled, which can be disabled by setting the environment
- // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
- // to "false".
- XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true)
-
- // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
- // which can be disabled by setting the environment variable
- // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
- XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true)
- // XDSOutlierDetection indicates whether outlier detection support is
- // enabled, which can be disabled by setting the environment variable
- // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false".
- XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true)
- // XDSFederation indicates whether federation support is enabled, which can
- // be enabled by setting the environment variable
- // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true".
- XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true)
-
- // XDSRLS indicates whether processing of Cluster Specifier plugins and
- // support for the RLS CLuster Specifier is enabled, which can be disabled by
- // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to
- // "false".
- XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true)
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
- // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which
- // can be disabled by setting the environment variable
- // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false".
- XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true)
)
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
new file mode 100644
index 000000000..7617be215
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/experimental.go
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+var (
+ // WithBufferPool is implemented by the grpc package and returns a dial
+ // option to configure a shared buffer pool for a grpc.ClientConn.
+ WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
+
+ // BufferPool is implemented by the grpc package and returns a server
+ // option to configure a shared buffer pool for a grpc.Server.
+ BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+)
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
similarity index 63%
rename from vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
rename to vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
index faa998de7..092ad187a 100644
--- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
@@ -16,17 +16,21 @@
*
*/
+// Package grpclog provides logging functionality for internal gRPC packages,
+// outside of the functionality provided by the external `grpclog` package.
package grpclog
import (
"fmt"
+
+ "google.golang.org/grpc/grpclog"
)
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
- logger DepthLoggerV2
+ logger grpclog.DepthLoggerV2
prefix string
}
@@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) {
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
- InfoDepth(1, fmt.Sprintf(format, args...))
+ grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
@@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) {
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
}
- WarningDepth(1, fmt.Sprintf(format, args...))
+ grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
@@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) {
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
}
- ErrorDepth(1, fmt.Sprintf(format, args...))
-}
-
-// Debugf does info logging at verbose level 2.
-func (pl *PrefixLogger) Debugf(format string, args ...any) {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- if !Logger.V(2) {
- return
- }
- if pl != nil {
- // Handle nil, so the tests can pass in a nil logger.
- format = pl.prefix + format
- pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
- return
- }
- InfoDepth(1, fmt.Sprintf(format, args...))
-
+ grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
- // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
- // rewrite PrefixLogger a little to ensure that we don't use the global
- // `Logger` here, and instead use the `logger` field.
- return Logger.V(l)
+ if pl != nil {
+ return pl.logger.V(l)
+ }
+ return true
}
// NewPrefixLogger creates a prefix logger with the given prefix.
-func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
+func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{logger: logger, prefix: prefix}
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
deleted file mode 100644
index aa97273e7..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcrand implements math/rand functions in a concurrent-safe way
-// with a global random source, independent of math/rand's global source.
-package grpcrand
-
-import (
- "math/rand"
- "sync"
- "time"
-)
-
-var (
- r = rand.New(rand.NewSource(time.Now().UnixNano()))
- mu sync.Mutex
-)
-
-// Int implements rand.Int on the grpcrand global source.
-func Int() int {
- mu.Lock()
- defer mu.Unlock()
- return r.Int()
-}
-
-// Int63n implements rand.Int63n on the grpcrand global source.
-func Int63n(n int64) int64 {
- mu.Lock()
- defer mu.Unlock()
- return r.Int63n(n)
-}
-
-// Intn implements rand.Intn on the grpcrand global source.
-func Intn(n int) int {
- mu.Lock()
- defer mu.Unlock()
- return r.Intn(n)
-}
-
-// Int31n implements rand.Int31n on the grpcrand global source.
-func Int31n(n int32) int32 {
- mu.Lock()
- defer mu.Unlock()
- return r.Int31n(n)
-}
-
-// Float64 implements rand.Float64 on the grpcrand global source.
-func Float64() float64 {
- mu.Lock()
- defer mu.Unlock()
- return r.Float64()
-}
-
-// Uint64 implements rand.Uint64 on the grpcrand global source.
-func Uint64() uint64 {
- mu.Lock()
- defer mu.Unlock()
- return r.Uint64()
-}
-
-// Uint32 implements rand.Uint32 on the grpcrand global source.
-func Uint32() uint32 {
- mu.Lock()
- defer mu.Unlock()
- return r.Uint32()
-}
-
-// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
-func ExpFloat64() float64 {
- mu.Lock()
- defer mu.Unlock()
- return r.ExpFloat64()
-}
-
-// Shuffle implements rand.Shuffle on the grpcrand global source.
-var Shuffle = func(n int, f func(int, int)) {
- mu.Lock()
- defer mu.Unlock()
- r.Shuffle(n, f)
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
index 900917dbe..8e8e86128 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -20,7 +20,6 @@ package grpcsync
import (
"context"
- "sync"
"google.golang.org/grpc/internal/buffer"
)
@@ -38,8 +37,6 @@ type CallbackSerializer struct {
done chan struct{}
callbacks *buffer.Unbounded
- closedMu sync.Mutex
- closed bool
}
// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
@@ -56,65 +53,55 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
return cs
}
-// Schedule adds a callback to be scheduled after existing callbacks are run.
+// TrySchedule tries to schedule the provided callback function f to be
+// executed in the order it was added. This is a best-effort operation. If the
+// context passed to NewCallbackSerializer was canceled before this method is
+// called, the callback will not be scheduled.
//
// Callbacks are expected to honor the context when performing any blocking
// operations, and should return early when the context is canceled.
-//
-// Return value indicates if the callback was successfully added to the list of
-// callbacks to be executed by the serializer. It is not possible to add
-// callbacks once the context passed to NewCallbackSerializer is cancelled.
-func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
- cs.closedMu.Lock()
- defer cs.closedMu.Unlock()
+func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
+ cs.callbacks.Put(f)
+}
- if cs.closed {
- return false
+// ScheduleOr schedules the provided callback function f to be executed in the
+// order it was added. If the context passed to NewCallbackSerializer has been
+// canceled before this method is called, the onFailure callback will be
+// executed inline instead.
+//
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
+ if cs.callbacks.Put(f) != nil {
+ onFailure()
}
- cs.callbacks.Put(f)
- return true
}
func (cs *CallbackSerializer) run(ctx context.Context) {
- var backlog []func(context.Context)
-
defer close(cs.done)
+
+ // TODO: when Go 1.21 is the oldest supported version, this loop and Close
+ // can be replaced with:
+ //
+ // context.AfterFunc(ctx, cs.callbacks.Close)
for ctx.Err() == nil {
select {
case <-ctx.Done():
// Do nothing here. Next iteration of the for loop will not happen,
// since ctx.Err() would be non-nil.
- case callback, ok := <-cs.callbacks.Get():
- if !ok {
- return
- }
+ case cb := <-cs.callbacks.Get():
cs.callbacks.Load()
- callback.(func(ctx context.Context))(ctx)
+ cb.(func(context.Context))(ctx)
}
}
- // Fetch pending callbacks if any, and execute them before returning from
- // this method and closing cs.done.
- cs.closedMu.Lock()
- cs.closed = true
- backlog = cs.fetchPendingCallbacks()
+ // Close the buffer to prevent new callbacks from being added.
cs.callbacks.Close()
- cs.closedMu.Unlock()
- for _, b := range backlog {
- b(ctx)
- }
-}
-func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) {
- var backlog []func(context.Context)
- for {
- select {
- case b := <-cs.callbacks.Get():
- backlog = append(backlog, b.(func(context.Context)))
- cs.callbacks.Load()
- default:
- return backlog
- }
+ // Run all pending callbacks.
+ for cb := range cs.callbacks.Get() {
+ cs.callbacks.Load()
+ cb.(func(context.Context))(ctx)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
index aef8cec1a..6d8c2f518 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
@@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
if ps.msg != nil {
msg := ps.msg
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[sub] {
@@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) {
ps.msg = msg
for sub := range ps.subscribers {
s := sub
- ps.cs.Schedule(func(context.Context) {
+ ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[s] {
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
index 9f4090967..e8d866984 100644
--- a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
@@ -20,8 +20,6 @@ package grpcutil
import (
"strings"
-
- "google.golang.org/grpc/internal/envconfig"
)
// RegisteredCompressorNames holds names of the registered compressors.
@@ -40,8 +38,5 @@ func IsCompressorNameRegistered(name string) bool {
// RegisteredCompressors returns a string of registered compressor names
// separated by comma.
func RegisteredCompressors() string {
- if !envconfig.AdvertiseCompressors {
- return ""
- }
return strings.Join(RegisteredCompressorNames, ",")
}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
index ec62b4775..683d1955c 100644
--- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) {
}
// baseContentType is the base content-type for gRPC. This is a valid
-// content-type on it's own, but can also include a content-subtype such as
+// content-type on its own, but can also include a content-subtype such as
// "proto" as a suffix after "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
// for more details.
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
index 6c272476e..2c13ee9da 100644
--- a/vendor/google.golang.org/grpc/internal/idle/idle.go
+++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -26,8 +26,6 @@ import (
"sync"
"sync/atomic"
"time"
-
- "google.golang.org/grpc/grpclog"
)
// For overriding in unit tests.
@@ -39,27 +37,12 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
// and exit from idle mode.
type Enforcer interface {
ExitIdleMode() error
- EnterIdleMode() error
-}
-
-// Manager defines the functionality required to track RPC activity on a
-// channel.
-type Manager interface {
- OnCallBegin() error
- OnCallEnd()
- Close()
+ EnterIdleMode()
}
-type noopManager struct{}
-
-func (noopManager) OnCallBegin() error { return nil }
-func (noopManager) OnCallEnd() {}
-func (noopManager) Close() {}
-
-// manager implements the Manager interface. It uses atomic operations to
-// synchronize access to shared state and a mutex to guarantee mutual exclusion
-// in a critical section.
-type manager struct {
+// Manager implements idleness detection and calls the configured Enforcer to
+// enter/exit idle mode when appropriate. Must be created by NewManager.
+type Manager struct {
// State accessed atomically.
lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
@@ -69,8 +52,7 @@ type manager struct {
// Can be accessed without atomics or mutex since these are set at creation
// time and read-only after that.
enforcer Enforcer // Functionality provided by grpc.ClientConn.
- timeout int64 // Idle timeout duration nanos stored as an int64.
- logger grpclog.LoggerV2
+ timeout time.Duration
// idleMu is used to guarantee mutual exclusion in two scenarios:
// - Opposing intentions:
@@ -88,57 +70,48 @@ type manager struct {
timer *time.Timer
}
-// ManagerOptions is a collection of options used by
-// NewManager.
-type ManagerOptions struct {
- Enforcer Enforcer
- Timeout time.Duration
- Logger grpclog.LoggerV2
+// NewManager creates a new idleness manager implementation for the
+// given idle timeout. It begins in idle mode.
+func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
+ return &Manager{
+ enforcer: enforcer,
+ timeout: timeout,
+ actuallyIdle: true,
+ activeCallsCount: -math.MaxInt32,
+ }
}
-// NewManager creates a new idleness manager implementation for the
-// given idle timeout.
-func NewManager(opts ManagerOptions) Manager {
- if opts.Timeout == 0 {
- return noopManager{}
+// resetIdleTimerLocked resets the idle timer to the given duration. Called
+// when exiting idle mode or when the timer fires and we need to reset it.
+func (m *Manager) resetIdleTimerLocked(d time.Duration) {
+ if m.isClosed() || m.timeout == 0 || m.actuallyIdle {
+ return
}
- m := &manager{
- enforcer: opts.Enforcer,
- timeout: int64(opts.Timeout),
- logger: opts.Logger,
+ // It is safe to ignore the return value from Reset() because this method is
+ // only ever called from the timer callback or when exiting idle mode.
+ if m.timer != nil {
+ m.timer.Stop()
}
- m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout)
- return m
+ m.timer = timeAfterFunc(d, m.handleIdleTimeout)
}
-// resetIdleTimer resets the idle timer to the given duration. This method
-// should only be called from the timer callback.
-func (m *manager) resetIdleTimer(d time.Duration) {
+func (m *Manager) resetIdleTimer(d time.Duration) {
m.idleMu.Lock()
defer m.idleMu.Unlock()
-
- if m.timer == nil {
- // Only close sets timer to nil. We are done.
- return
- }
-
- // It is safe to ignore the return value from Reset() because this method is
- // only ever called from the timer callback, which means the timer has
- // already fired.
- m.timer.Reset(d)
+ m.resetIdleTimerLocked(d)
}
// handleIdleTimeout is the timer callback that is invoked upon expiry of the
// configured idle timeout. The channel is considered inactive if there are no
// ongoing calls and no RPC activity since the last time the timer fired.
-func (m *manager) handleIdleTimeout() {
+func (m *Manager) handleIdleTimeout() {
if m.isClosed() {
return
}
if atomic.LoadInt32(&m.activeCallsCount) > 0 {
- m.resetIdleTimer(time.Duration(m.timeout))
+ m.resetIdleTimer(m.timeout)
return
}
@@ -148,24 +121,12 @@ func (m *manager) handleIdleTimeout() {
// Set the timer to fire after a duration of idle timeout, calculated
// from the time the most recent RPC completed.
atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
- m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano()))
+ m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout)
return
}
- // This CAS operation is extremely likely to succeed given that there has
- // been no activity since the last time we were here. Setting the
- // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the
- // channel is either in idle mode or is trying to get there.
- if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
- // This CAS operation can fail if an RPC started after we checked for
- // activity at the top of this method, or one was ongoing from before
- // the last time we were here. In both case, reset the timer and return.
- m.resetIdleTimer(time.Duration(m.timeout))
- return
- }
-
- // Now that we've set the active calls count to -math.MaxInt32, it's time to
- // actually move to idle mode.
+ // Now that we've checked that there has been no activity, attempt to enter
+ // idle mode, which is very likely to succeed.
if m.tryEnterIdleMode() {
// Successfully entered idle mode. No timer needed until we exit idle.
return
@@ -174,8 +135,7 @@ func (m *manager) handleIdleTimeout() {
// Failed to enter idle mode due to a concurrent RPC that kept the channel
// active, or because of an error from the channel. Undo the attempt to
// enter idle, and reset the timer to try again later.
- atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
- m.resetIdleTimer(time.Duration(m.timeout))
+ m.resetIdleTimer(m.timeout)
}
// tryEnterIdleMode instructs the channel to enter idle mode. But before
@@ -185,36 +145,50 @@ func (m *manager) handleIdleTimeout() {
// Return value indicates whether or not the channel moved to idle mode.
//
// Holds idleMu which ensures mutual exclusion with exitIdleMode.
-func (m *manager) tryEnterIdleMode() bool {
+func (m *Manager) tryEnterIdleMode() bool {
+ // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
+ // that the channel is either in idle mode or is trying to get there.
+ if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
+ // This CAS operation can fail if an RPC started after we checked for
+ // activity in the timer handler, or one was ongoing from before the
+ // last time the timer fired, or if a test is attempting to enter idle
+ // mode without checking. In all cases, abort going into idle mode.
+ return false
+ }
+ // N.B. if we fail to enter idle mode after this, we must re-add
+ // math.MaxInt32 to m.activeCallsCount.
+
m.idleMu.Lock()
defer m.idleMu.Unlock()
if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
// We raced and lost to a new RPC. Very rare, but stop entering idle.
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
return false
}
if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
- // An very short RPC could have come in (and also finished) after we
+ // A very short RPC could have come in (and also finished) after we
// checked for calls count and activity in handleIdleTimeout(), but
// before the CAS operation. So, we need to check for activity again.
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
return false
}
- // No new RPCs have come in since we last set the active calls count value
- // -math.MaxInt32 in the timer callback. And since we have the lock, it is
- // safe to enter idle mode now.
- if err := m.enforcer.EnterIdleMode(); err != nil {
- m.logger.Errorf("Failed to enter idle mode: %v", err)
- return false
- }
-
- // Successfully entered idle mode.
+ // No new RPCs have come in since we set the active calls count value to
+ // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
+ // unconditionally now.
+ m.enforcer.EnterIdleMode()
m.actuallyIdle = true
return true
}
+// EnterIdleModeForTesting instructs the channel to enter idle mode.
+func (m *Manager) EnterIdleModeForTesting() {
+ m.tryEnterIdleMode()
+}
+
// OnCallBegin is invoked at the start of every RPC.
-func (m *manager) OnCallBegin() error {
+func (m *Manager) OnCallBegin() error {
if m.isClosed() {
return nil
}
@@ -227,7 +201,7 @@ func (m *manager) OnCallBegin() error {
// Channel is either in idle mode or is in the process of moving to idle
// mode. Attempt to exit idle mode to allow this RPC.
- if err := m.exitIdleMode(); err != nil {
+ if err := m.ExitIdleMode(); err != nil {
// Undo the increment to calls count, and return an error causing the
// RPC to fail.
atomic.AddInt32(&m.activeCallsCount, -1)
@@ -238,28 +212,30 @@ func (m *manager) OnCallBegin() error {
return nil
}
-// exitIdleMode instructs the channel to exit idle mode.
-//
-// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
-func (m *manager) exitIdleMode() error {
+// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
+// internal state.
+func (m *Manager) ExitIdleMode() error {
+ // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
m.idleMu.Lock()
defer m.idleMu.Unlock()
- if !m.actuallyIdle {
- // This can happen in two scenarios:
+ if m.isClosed() || !m.actuallyIdle {
+ // This can happen in three scenarios:
// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
// tryEnterIdleMode(). But before the latter could grab the lock, an RPC
// came in and OnCallBegin() noticed that the calls count is negative.
// - Channel is in idle mode, and multiple new RPCs come in at the same
// time, all of them notice a negative calls count in OnCallBegin and get
- // here. The first one to get the lock would got the channel to exit idle.
+ // here. The first one to get the lock would get the channel to exit idle.
+ // - Channel is not in idle mode, and the user calls Connect which calls
+ // m.ExitIdleMode.
//
- // Either way, nothing to do here.
+ // In any case, there is nothing to do here.
return nil
}
if err := m.enforcer.ExitIdleMode(); err != nil {
- return fmt.Errorf("channel failed to exit idle mode: %v", err)
+ return fmt.Errorf("failed to exit idle mode: %w", err)
}
// Undo the idle entry process. This also respects any new RPC attempts.
@@ -267,12 +243,12 @@ func (m *manager) exitIdleMode() error {
m.actuallyIdle = false
// Start a new timer to fire after the configured idle timeout.
- m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout)
+ m.resetIdleTimerLocked(m.timeout)
return nil
}
// OnCallEnd is invoked at the end of every RPC.
-func (m *manager) OnCallEnd() {
+func (m *Manager) OnCallEnd() {
if m.isClosed() {
return
}
@@ -287,15 +263,18 @@ func (m *manager) OnCallEnd() {
atomic.AddInt32(&m.activeCallsCount, -1)
}
-func (m *manager) isClosed() bool {
+func (m *Manager) isClosed() bool {
return atomic.LoadInt32(&m.closed) == 1
}
-func (m *manager) Close() {
+// Close stops the timer associated with the Manager, if it exists.
+func (m *Manager) Close() {
atomic.StoreInt32(&m.closed, 1)
m.idleMu.Lock()
- m.timer.Stop()
- m.timer = nil
+ if m.timer != nil {
+ m.timer.Stop()
+ m.timer = nil
+ }
m.idleMu.Unlock()
}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index c8a8c76d6..20b4dc3d3 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -57,7 +57,7 @@ var (
// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
// stored in the passed in attributes. This is set by
// credentials/xds/xds.go.
- GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo
+ GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
// GetServerCredentials returns the transport credentials configured on a
// gRPC server. An xDS-enabled server needs to know what type of credentials
// is configured on the underlying gRPC server. This is set by server.go.
@@ -68,11 +68,11 @@ var (
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
CanonicalString any // func (codes.Code) string
- // DrainServerTransports initiates a graceful close of existing connections
- // on a gRPC server accepted on the provided listener address. An
- // xDS-enabled server invokes this method on a grpc.Server when a particular
- // listener moves to "not-serving" mode.
- DrainServerTransports any // func(*grpc.Server, string)
+ // IsRegisteredMethod returns whether the passed in method is registered as
+ // a method on the server.
+ IsRegisteredMethod any // func(*grpc.Server, string) bool
+ // ServerFromContext returns the server from the context.
+ ServerFromContext any // func(context.Context) *grpc.Server
// AddGlobalServerOptions adds an array of ServerOption that will be
// effective globally for newly created servers. The priority will be: 1.
// user-provided; 2. this method; 3. default values.
@@ -106,6 +106,14 @@ var (
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ClearGlobalDialOptions func()
+
+ // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be
+ // configured for newly created ClientConns.
+ AddGlobalPerTargetDialOptions any // func (opt any)
+ // ClearGlobalPerTargetDialOptions clears the slice of global late apply
+ // dial options.
+ ClearGlobalPerTargetDialOptions func()
+
// JoinDialOptions combines the dial options passed as arguments into a
// single dial option.
JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
@@ -126,7 +134,8 @@ var (
// deleted or changed.
BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
- // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn
+ // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a
+ // provided grpc.ClientConn.
SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
@@ -174,14 +183,57 @@ var (
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
// metadata to RPCs.
- GRPCResolverSchemeExtraMetadata string = "xds"
+ GRPCResolverSchemeExtraMetadata = "xds"
+
+ // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
+ EnterIdleModeForTesting any // func(*grpc.ClientConn)
+
+ // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
+ ExitIdleModeForTesting any // func(*grpc.ClientConn) error
+
+ // ChannelzTurnOffForTesting disables the Channelz service for testing
+ // purposes.
+ ChannelzTurnOffForTesting func()
+
+ // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
+ // invoke resource-not-found error for the given resource type and name.
+ TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error
+
+ // FromOutgoingContextRaw returns the un-merged, intermediary contents of
+ // metadata.rawMD.
+ FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
+
+ // UserSetDefaultScheme is set to true if the user has overridden the
+ // default resolver scheme.
+ UserSetDefaultScheme = false
+
+ // ConnectedAddress returns the connected address for a SubConnState. The
+ // address is only valid if the state is READY.
+ ConnectedAddress any // func (scs SubConnState) resolver.Address
+
+ // SetConnectedAddress sets the connected address for a SubConnState.
+ SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
+
+ // SnapshotMetricRegistryForTesting snapshots the global data of the metric
+ // registry. Returns a cleanup function that sets the metric registry to its
+ // original state. Only called in testing functions.
+ SnapshotMetricRegistryForTesting func() func()
+
+ // SetDefaultBufferPoolForTesting updates the default buffer pool, for
+ // testing purposes.
+ SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
+
+ // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
+ // testing purposes.
+ SetBufferPoolingThresholdForTesting any // func(int)
)
-// HealthChecker defines the signature of the client-side LB channel health checking function.
+// HealthChecker defines the signature of the client-side LB channel health
+// checking function.
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
-// it's health back by calling setConnectivityState().
+// its health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
index 703319137..dbee7a60d 100644
--- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go
+++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
@@ -24,10 +24,8 @@ import (
"encoding/json"
"fmt"
- "github.com/golang/protobuf/jsonpb"
- protov1 "github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
- protov2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/protoadapt"
)
const jsonIndent = " "
@@ -36,21 +34,14 @@ const jsonIndent = " "
//
// If marshal fails, it falls back to fmt.Sprintf("%+v").
func ToJSON(e any) string {
- switch ee := e.(type) {
- case protov1.Message:
- mm := jsonpb.Marshaler{Indent: jsonIndent}
- ret, err := mm.MarshalToString(ee)
- if err != nil {
- // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
- // messages are not imported, and this will fail because the message
- // is not found.
- return fmt.Sprintf("%+v", ee)
- }
- return ret
- case protov2.Message:
+ if ee, ok := e.(protoadapt.MessageV1); ok {
+ e = protoadapt.MessageV2Of(ee)
+ }
+
+ if ee, ok := e.(protoadapt.MessageV2); ok {
mm := protojson.MarshalOptions{
- Multiline: true,
Indent: jsonIndent,
+ Multiline: true,
}
ret, err := mm.Marshal(ee)
if err != nil {
@@ -60,13 +51,13 @@ func ToJSON(e any) string {
return fmt.Sprintf("%+v", ee)
}
return string(ret)
- default:
- ret, err := json.MarshalIndent(ee, "", jsonIndent)
- if err != nil {
- return fmt.Sprintf("%+v", ee)
- }
- return string(ret)
}
+
+ ret, err := json.MarshalIndent(e, "", jsonIndent)
+ if err != nil {
+ return fmt.Sprintf("%+v", e)
+ }
+ return string(ret)
}
// FormatJSON formats the input json bytes with indentation.
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index 99e1e5b36..8691698ef 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -23,8 +23,8 @@ package dns
import (
"context"
"encoding/json"
- "errors"
"fmt"
+ "math/rand"
"net"
"os"
"strconv"
@@ -36,26 +36,37 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig"
- "google.golang.org/grpc/internal/grpcrand"
+ "google.golang.org/grpc/internal/resolver/dns/internal"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
-// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
-// addresses from SRV records. Must not be changed after init time.
-var EnableSRVLookups = false
-
-var logger = grpclog.Component("dns")
-
-// Globals to stub out in tests. TODO: Perhaps these two can be combined into a
-// single variable for testing the resolver?
var (
- newTimer = time.NewTimer
- newTimerDNSResRate = time.NewTimer
+ // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
+ // addresses from SRV records. Must not be changed after init time.
+ EnableSRVLookups = false
+
+ // MinResolutionInterval is the minimum interval at which re-resolutions are
+ // allowed. This helps to prevent excessive re-resolution.
+ MinResolutionInterval = 30 * time.Second
+
+ // ResolvingTimeout specifies the maximum duration for a DNS resolution request.
+ // If the timeout expires before a response is received, the request will be canceled.
+ //
+ // It is recommended to set this value at application startup. Avoid modifying this variable
+ // after initialization as it's not thread-safe for concurrent modification.
+ ResolvingTimeout = 30 * time.Second
+
+ logger = grpclog.Component("dns")
)
func init() {
resolver.Register(NewBuilder())
+ internal.TimeAfterFunc = time.After
+ internal.TimeNowFunc = time.Now
+ internal.TimeUntilFunc = time.Until
+ internal.NewNetResolver = newNetResolver
+ internal.AddressDialer = addressDialer
}
const (
@@ -70,23 +81,6 @@ const (
txtAttribute = "grpc_config="
)
-var (
- errMissingAddr = errors.New("dns resolver: missing address")
-
- // Addresses ending with a colon that is supposed to be the separator
- // between host and port is not allowed. E.g. "::" is a valid address as
- // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
- // a colon as the host and port separator
- errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
-)
-
-var (
- defaultResolver netResolver = net.DefaultResolver
- // To prevent excessive re-resolution, we enforce a rate limit on DNS
- // resolution requests.
- minDNSResRate = 30 * time.Second
-)
-
var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
return func(ctx context.Context, network, _ string) (net.Conn, error) {
var dialer net.Dialer
@@ -94,7 +88,11 @@ var addressDialer = func(address string) func(context.Context, string, string) (
}
}
-var newNetResolver = func(authority string) (netResolver, error) {
+var newNetResolver = func(authority string) (internal.NetResolver, error) {
+ if authority == "" {
+ return net.DefaultResolver, nil
+ }
+
host, port, err := parseTarget(authority, defaultDNSSvrPort)
if err != nil {
return nil, err
@@ -104,7 +102,7 @@ var newNetResolver = func(authority string) (netResolver, error) {
return &net.Resolver{
PreferGo: true,
- Dial: addressDialer(authorityWithPort),
+ Dial: internal.AddressDialer(authorityWithPort),
}, nil
}
@@ -142,13 +140,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
disableServiceConfig: opts.DisableServiceConfig,
}
- if target.URL.Host == "" {
- d.resolver = defaultResolver
- } else {
- d.resolver, err = newNetResolver(target.URL.Host)
- if err != nil {
- return nil, err
- }
+ d.resolver, err = internal.NewNetResolver(target.URL.Host)
+ if err != nil {
+ return nil, err
}
d.wg.Add(1)
@@ -161,12 +155,6 @@ func (b *dnsBuilder) Scheme() string {
return "dns"
}
-type netResolver interface {
- LookupHost(ctx context.Context, host string) (addrs []string, err error)
- LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
- LookupTXT(ctx context.Context, name string) (txts []string, err error)
-}
-
// deadResolver is a resolver that does nothing.
type deadResolver struct{}
@@ -178,7 +166,7 @@ func (deadResolver) Close() {}
type dnsResolver struct {
host string
port string
- resolver netResolver
+ resolver internal.NetResolver
ctx context.Context
cancel context.CancelFunc
cc resolver.ClientConn
@@ -189,7 +177,7 @@ type dnsResolver struct {
// finished. Otherwise, data race will be possible. [Race Example] in
// dns_resolver_test we replace the real lookup functions with mocked ones to
// facilitate testing. If Close() doesn't wait for watcher() goroutine
- // finishes, race detector sometimes will warns lookup (READ the lookup
+ // finishes, race detector sometimes will warn lookup (READ the lookup
// function pointers) inside watcher() goroutine has data race with
// replaceNetFunc (WRITE the lookup function pointers).
wg sync.WaitGroup
@@ -223,45 +211,43 @@ func (d *dnsResolver) watcher() {
err = d.cc.UpdateState(*state)
}
- var timer *time.Timer
+ var nextResolutionTime time.Time
if err == nil {
// Success resolving, wait for the next ResolveNow. However, also wait 30
// seconds at the very least to prevent constantly re-resolving.
backoffIndex = 1
- timer = newTimerDNSResRate(minDNSResRate)
+ nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval)
select {
case <-d.ctx.Done():
- timer.Stop()
return
case <-d.rn:
}
} else {
// Poll on an error found in DNS Resolver or an error received from
// ClientConn.
- timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex))
+ nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex))
backoffIndex++
}
select {
case <-d.ctx.Done():
- timer.Stop()
return
- case <-timer.C:
+ case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)):
}
}
}
-func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
+func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) {
if !EnableSRVLookups {
return nil, nil
}
var newAddrs []resolver.Address
- _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
+ _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host)
if err != nil {
err = handleDNSError(err, "SRV") // may become nil
return nil, err
}
for _, s := range srvs {
- lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
+ lbAddrs, err := d.resolver.LookupHost(ctx, s.Target)
if err != nil {
err = handleDNSError(err, "A") // may become nil
if err == nil {
@@ -298,8 +284,8 @@ func handleDNSError(err error, lookupType string) error {
return err
}
-func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
- ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
+func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult {
+ ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host)
if err != nil {
if envconfig.TXTErrIgnore {
return nil
@@ -326,8 +312,8 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult {
return d.cc.ParseServiceConfig(sc)
}
-func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
- addrs, err := d.resolver.LookupHost(d.ctx, d.host)
+func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) {
+ addrs, err := d.resolver.LookupHost(ctx, d.host)
if err != nil {
err = handleDNSError(err, "A")
return nil, err
@@ -345,8 +331,10 @@ func (d *dnsResolver) lookupHost() ([]resolver.Address, error) {
}
func (d *dnsResolver) lookup() (*resolver.State, error) {
- srv, srvErr := d.lookupSRV()
- addrs, hostErr := d.lookupHost()
+ ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout)
+ defer cancel()
+ srv, srvErr := d.lookupSRV(ctx)
+ addrs, hostErr := d.lookupHost(ctx)
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
return nil, hostErr
}
@@ -356,7 +344,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
}
if !d.disableServiceConfig {
- state.ServiceConfig = d.lookupTXT()
+ state.ServiceConfig = d.lookupTXT(ctx)
}
return &state, nil
}
@@ -387,7 +375,7 @@ func formatIP(addr string) (addrIP string, ok bool) {
// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
func parseTarget(target, defaultPort string) (host, port string, err error) {
if target == "" {
- return "", "", errMissingAddr
+ return "", "", internal.ErrMissingAddr
}
if ip := net.ParseIP(target); ip != nil {
// target is an IPv4 or IPv6(without brackets) address
@@ -397,7 +385,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
if port == "" {
// If the port field is empty (target ends with colon), e.g. "[::1]:",
// this is an error.
- return "", "", errEndsWithColon
+ return "", "", internal.ErrEndsWithColon
}
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
if host == "" {
@@ -437,7 +425,7 @@ func chosenByPercentage(a *int) bool {
if a == nil {
return true
}
- return grpcrand.Intn(100)+1 <= *a
+ return rand.Intn(100)+1 <= *a
}
func canaryingSC(js string) string {
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
new file mode 100644
index 000000000..c0eae4f5f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the dns resolver package.
+package internal
+
+import (
+ "context"
+ "errors"
+ "net"
+ "time"
+)
+
+// NetResolver groups the methods on net.Resolver that are used by the DNS
+// resolver implementation. This allows the default net.Resolver instance to be
+// overridden from tests.
+type NetResolver interface {
+ LookupHost(ctx context.Context, host string) (addrs []string, err error)
+ LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
+ LookupTXT(ctx context.Context, name string) (txts []string, err error)
+}
+
+var (
+ // ErrMissingAddr is the error returned when building a DNS resolver when
+ // the provided target name is empty.
+ ErrMissingAddr = errors.New("dns resolver: missing address")
+
+ // ErrEndsWithColon is the error returned when building a DNS resolver when
+ // the provided target name ends with a colon that is supposed to be the
+ // separator between host and port. E.g. "::" is a valid address as it is
+ // an IPv6 address (host only) and "[::]:" is invalid as it ends with a
+ // colon as the host and port separator
+ ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
+)
+
+// The following vars are overridden from tests.
+var (
+ // TimeAfterFunc is used by the DNS resolver to wait for the given duration
+ // to elapse. In non-test code, this is implemented by time.After. In test
+ // code, this can be used to control the amount of time the resolver is
+ // blocked waiting for the duration to elapse.
+ TimeAfterFunc func(time.Duration) <-chan time.Time
+
+ // TimeNowFunc is used by the DNS resolver to get the current time.
+ // In non-test code, this is implemented by time.Now. In test code,
+ // this can be used to control the current time for the resolver.
+ TimeNowFunc func() time.Time
+
+ // TimeUntilFunc is used by the DNS resolver to calculate the remaining
+ // wait time for re-resolution. In non-test code, this is implemented by
+ // time.Until. In test code, this can be used to control the remaining
+ // time for resolver to wait for re-resolution.
+ TimeUntilFunc func(time.Time) time.Duration
+
+ // NewNetResolver returns the net.Resolver instance for the given target.
+ NewNetResolver func(string) (NetResolver, error)
+
+ // AddressDialer is the dialer used to dial the DNS server. It accepts the
+ // Host portion of the URL corresponding to the user's dial target and
+ // returns a dial function.
+ AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error)
+)
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
index afac56572..b901c7bac 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -55,7 +55,7 @@ func (r *passthroughResolver) start() {
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
}
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
+func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
func (*passthroughResolver) Close() {}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
index 160911687..27cd81af9 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -61,6 +61,10 @@ func (b *builder) Scheme() string {
return b.scheme
}
+func (b *builder) OverrideAuthority(resolver.Target) string {
+ return "localhost"
+}
+
type nopResolver struct {
}
diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go
new file mode 100644
index 000000000..fd33af51a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/labels.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats provides internal stats related functionality.
+package stats
+
+import "context"
+
+// Labels are the labels for metrics.
+type Labels struct {
+ // TelemetryLabels are the telemetry labels to record.
+ TelemetryLabels map[string]string
+}
+
+type labelsKey struct{}
+
+// GetLabels returns the Labels stored in the context, or nil if there is one.
+func GetLabels(ctx context.Context) *Labels {
+ labels, _ := ctx.Value(labelsKey{}).(*Labels)
+ return labels
+}
+
+// SetLabels sets the Labels in the context.
+func SetLabels(ctx context.Context, labels *Labels) context.Context {
+ // could also append
+ return context.WithValue(ctx, labelsKey{}, labels)
+}
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
new file mode 100644
index 000000000..79044657b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import (
+ "fmt"
+
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats"
+)
+
+// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
+//
+// It eats any record calls where the label values provided do not match the
+// number of label keys.
+type MetricsRecorderList struct {
+ // metricsRecorders are the metrics recorders this list will forward to.
+ metricsRecorders []estats.MetricsRecorder
+}
+
+// NewMetricsRecorderList creates a new metric recorder list with all the stats
+// handlers provided which implement the MetricsRecorder interface.
+// If no stats handlers provided implement the MetricsRecorder interface,
+// the MetricsRecorder list returned is a no-op.
+func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
+ var mrs []estats.MetricsRecorder
+ for _, sh := range shs {
+ if mr, ok := sh.(estats.MetricsRecorder); ok {
+ mrs = append(mrs, mr)
+ }
+ }
+ return &MetricsRecorderList{
+ metricsRecorders: mrs,
+ }
+}
+
+func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
+ if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
+ panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
+ }
+}
+
+// RecordInt64Count records the measurement alongside labels on the int
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Count(handle, incr, labels...)
+ }
+}
+
+// RecordFloat64Count records the measurement alongside labels on the float
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Count(handle, incr, labels...)
+ }
+}
+
+// RecordInt64Histo records the measurement alongside labels on the int
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Histo(handle, incr, labels...)
+ }
+}
+
+// RecordFloat64Histo records the measurement alongside labels on the float
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Histo(handle, incr, labels...)
+ }
+}
+
+// RecordInt64Gauge records the measurement alongside labels on the int
+// gauge associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Gauge(handle, incr, labels...)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
index 4cf85cad9..1186f1e9a 100644
--- a/vendor/google.golang.org/grpc/internal/status/status.go
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -31,10 +31,11 @@ import (
"errors"
"fmt"
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/protoadapt"
+ "google.golang.org/protobuf/types/known/anypb"
)
// Status represents an RPC status code, message, and details. It is immutable
@@ -43,6 +44,34 @@ type Status struct {
s *spb.Status
}
+// NewWithProto returns a new status including details from statusProto. This
+// is meant to be used by the gRPC library only.
+func NewWithProto(code codes.Code, message string, statusProto []string) *Status {
+ if len(statusProto) != 1 {
+ // No grpc-status-details bin header, or multiple; just ignore.
+ return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+ }
+ st := &spb.Status{}
+ if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil {
+ // Probably not a google.rpc.Status proto; do not provide details.
+ return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+ }
+ if st.Code == int32(code) {
+ // The codes match between the grpc-status header and the
+ // grpc-status-details-bin header; use the full details proto.
+ return &Status{s: st}
+ }
+ return &Status{
+ s: &spb.Status{
+ Code: int32(codes.Internal),
+ Message: fmt.Sprintf(
+ "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v",
+ code, message, st,
+ ),
+ },
+ }
+}
+
// New returns a Status representing c and msg.
func New(c codes.Code, msg string) *Status {
return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
@@ -102,36 +131,69 @@ func (s *Status) Err() error {
// WithDetails returns a new status with the provided details messages appended to the status.
// If any errors are encountered, it returns nil and the first error encountered.
-func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
+func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
if s.Code() == codes.OK {
return nil, errors.New("no error details for status with code OK")
}
// s.Code() != OK implies that s.Proto() != nil.
p := s.Proto()
for _, detail := range details {
- any, err := ptypes.MarshalAny(detail)
+ m, err := anypb.New(protoadapt.MessageV2Of(detail))
if err != nil {
return nil, err
}
- p.Details = append(p.Details, any)
+ p.Details = append(p.Details, m)
}
return &Status{s: p}, nil
}
// Details returns a slice of details messages attached to the status.
// If a detail cannot be decoded, the error is returned in place of the detail.
+// If the detail can be decoded, the proto message returned is of the same
+// type that was given to WithDetails().
func (s *Status) Details() []any {
if s == nil || s.s == nil {
return nil
}
details := make([]any, 0, len(s.s.Details))
for _, any := range s.s.Details {
- detail := &ptypes.DynamicAny{}
- if err := ptypes.UnmarshalAny(any, detail); err != nil {
+ detail, err := any.UnmarshalNew()
+ if err != nil {
details = append(details, err)
continue
}
- details = append(details, detail.Message)
+ // The call to MessageV1Of is required to unwrap the proto message if
+ // it implemented only the MessageV1 API. The proto message would have
+ // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
+ // added to a global registry used by any.UnmarshalNew().
+ // MessageV1Of has the following behaviour:
+ // 1. If the given message is a wrapped MessageV1, it returns the
+ // unwrapped value.
+ // 2. If the given message already implements MessageV1, it returns it
+ // as is.
+ // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
+ //
+ // Since the Status.WithDetails() API only accepts MessageV1, calling
+ // MessageV1Of ensures we return the same type that was given to
+ // WithDetails:
+ // * If the give type implemented only MessageV1, the unwrapping from
+ // point 1 above will restore the type.
+ // * If the given type implemented both MessageV1 and MessageV2, point 2
+ // above will ensure no wrapping is performed.
+ // * If the given type implemented only MessageV2 and was wrapped using
+ // MessageV1Of before passing to WithDetails(), it would be unwrapped
+ // in WithDetails by calling MessageV2Of(). Point 3 above will ensure
+ // that the type is wrapped in a MessageV1 wrapper again before
+ // returning. Note that protoc-gen-go doesn't generate code which
+ // implements ONLY MessageV2 at the time of writing.
+ //
+ // NOTE: Status details can also be added using the FromProto method.
+ // This could theoretically allow passing a Detail message that only
+ // implements the V2 API. In such a case the message will be wrapped in
+ // a MessageV1 wrapper when fetched using Details().
+ // Since protoc-gen-go generates only code that implements both V1 and
+ // V2 APIs for backward compatibility, this is not a concern.
+ details = append(details, protoadapt.MessageV1Of(detail))
}
return details
}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index 999f52cd7..54c24c2ff 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -58,20 +58,20 @@ func GetRusage() *Rusage {
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs. It a no-op function for non-linux environments.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
log()
return 0, 0
}
// SetTCPUserTimeout is a no-op function under non-linux environments.
-func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+func SetTCPUserTimeout(net.Conn, time.Duration) error {
log()
return nil
}
// GetTCPUserTimeout is a no-op function under non-linux environments.
// A negative return value indicates the operation is not supported
-func GetTCPUserTimeout(conn net.Conn) (int, error) {
+func GetTCPUserTimeout(net.Conn) (int, error) {
log()
return -1, nil
}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
similarity index 60%
rename from vendor/google.golang.org/grpc/internal/channelz/util_linux.go
rename to vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
index 98288c3f8..4f347edd4 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
@@ -1,6 +1,7 @@
+//go:build !unix && !windows
+
/*
- *
- * Copyright 2018 gRPC authors.
+ * Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,22 +17,13 @@
*
*/
-package channelz
+package internal
import (
- "syscall"
+ "net"
)
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(socket any) *SocketOptionData {
- c, ok := socket.(syscall.Conn)
- if !ok {
- return nil
- }
- data := &SocketOptionData{}
- if rawConn, err := c.SyscallConn(); err == nil {
- rawConn.Control(data.Getsockopt)
- return data
- }
- return nil
+// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{}
}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
new file mode 100644
index 000000000..7e7aaa546
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
@@ -0,0 +1,54 @@
+//go:build unix
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{
+ // Setting a negative value here prevents the Go stdlib from overriding
+ // the values of TCP keepalive time and interval. It also prevents the
+ // Go stdlib from enabling TCP keepalives by default.
+ KeepAlive: time.Duration(-1),
+ // This method is called after the underlying network socket is created,
+ // but before dialing the socket (or calling its connect() method). The
+ // combination of unconditionally enabling TCP keepalives here, and
+ // disabling the overriding of TCP keepalive parameters by setting the
+ // KeepAlive field to a negative value above, results in OS defaults for
+ // the TCP keepalive interval and time parameters.
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
new file mode 100644
index 000000000..d5c1085ee
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -0,0 +1,54 @@
+//go:build windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{
+ // Setting a negative value here prevents the Go stdlib from overriding
+ // the values of TCP keepalive time and interval. It also prevents the
+ // Go stdlib from enabling TCP keepalives by default.
+ KeepAlive: time.Duration(-1),
+ // This method is called after the underlying network socket is created,
+ // but before dialing the socket (or calling its connect() method). The
+ // combination of unconditionally enabling TCP keepalives here, and
+ // disabling the overriding of TCP keepalive parameters by setting the
+ // KeepAlive field to a negative value above, results in OS defaults for
+ // the TCP keepalive interval and time parameters.
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index b330ccedc..ef72fbb3a 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -32,6 +32,7 @@ import (
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@@ -148,9 +149,9 @@ type dataFrame struct {
streamID uint32
endStream bool
h []byte
- d []byte
+ reader mem.Reader
// onEachWrite is called every time
- // a part of d is written out.
+ // a part of data is written out.
onEachWrite func()
}
@@ -193,7 +194,7 @@ type goAway struct {
code http2.ErrCode
debugData []byte
headsUp bool
- closeConn error // if set, loopyWriter will exit, resulting in conn closure
+ closeConn error // if set, loopyWriter will exit with this error
}
func (*goAway) isTransportResponseFrame() bool { return false }
@@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream {
}
// controlBuffer is a way to pass information to loopy.
-// Information is passed as specific struct types called control frames.
-// A control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state.
-// It shouldn't be confused with an HTTP2 frame, although some of the control frames
-// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
+//
+// Information is passed as specific struct types called control frames. A
+// control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state. It
+// shouldn't be confused with an HTTP2 frame, although some of the control
+// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
type controlBuffer struct {
- ch chan struct{}
- done <-chan struct{}
+ wakeupCh chan struct{} // Unblocks readers waiting for something to read.
+ done <-chan struct{} // Closed when the transport is done.
+
+ // Mutex guards all the fields below, except trfChan which can be read
+ // atomically without holding mu.
mu sync.Mutex
- consumerWaiting bool
- list *itemList
- err error
+ consumerWaiting bool // True when readers are blocked waiting for new data.
+ closed bool // True when the controlbuf is finished.
+ list *itemList // List of queued control frames.
// transportResponseFrames counts the number of queued items that represent
// the response of an action initiated by the peer. trfChan is created
@@ -308,47 +313,59 @@ type controlBuffer struct {
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
- trfChan atomic.Value // chan struct{}
+ trfChan atomic.Pointer[chan struct{}]
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
return &controlBuffer{
- ch: make(chan struct{}, 1),
- list: &itemList{},
- done: done,
+ wakeupCh: make(chan struct{}, 1),
+ list: &itemList{},
+ done: done,
}
}
-// throttle blocks if there are too many incomingSettings/cleanupStreams in the
-// controlbuf.
+// throttle blocks if there are too many frames in the control buf that
+// represent the response of an action initiated by the peer, like
+// incomingSettings cleanupStreams etc.
func (c *controlBuffer) throttle() {
- ch, _ := c.trfChan.Load().(chan struct{})
- if ch != nil {
+ if ch := c.trfChan.Load(); ch != nil {
select {
- case <-ch:
+ case <-(*ch):
case <-c.done:
}
}
}
+// put adds an item to the controlbuf.
func (c *controlBuffer) put(it cbItem) error {
_, err := c.executeAndPut(nil, it)
return err
}
-func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) {
- var wakeUp bool
+// executeAndPut runs f, and if the return value is true, adds the given item to
+// the controlbuf. The item could be nil, in which case, this method simply
+// executes f and does not add the item to the controlbuf.
+//
+// The first return value indicates whether the item was successfully added to
+// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
+// if the control buffer is already closed.
+func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return false, ErrConnClosing
}
if f != nil {
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
+ if !f() { // f wasn't successful
return false, nil
}
}
+ if it == nil {
+ return true, nil
+ }
+
+ var wakeUp bool
if c.consumerWaiting {
wakeUp = true
c.consumerWaiting = false
@@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, err
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
- c.trfChan.Store(make(chan struct{}))
+ ch := make(chan struct{})
+ c.trfChan.Store(&ch)
}
}
- c.mu.Unlock()
if wakeUp {
select {
- case c.ch <- struct{}{}:
+ case c.wakeupCh <- struct{}{}:
default:
}
}
return true, nil
}
-// Note argument f should never be nil.
-func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- c.mu.Unlock()
- return true, nil
-}
-
+// get returns the next control frame from the control buffer. If block is true
+// **and** there are no control frames in the control buffer, the call blocks
+// until one of the conditions is met: there is a frame to return or the
+// transport is closed.
func (c *controlBuffer) get(block bool) (any, error) {
for {
c.mu.Lock()
- if c.err != nil {
+ frame, err := c.getOnceLocked()
+ if frame != nil || err != nil || !block {
+ // If we read a frame or an error, we can return to the caller. The
+ // call to getOnceLocked() returns a nil frame and a nil error if
+ // there is nothing to read, and in that case, if the caller asked
+ // us not to block, we can return now as well.
c.mu.Unlock()
- return nil, c.err
- }
- if !c.list.isEmpty() {
- h := c.list.dequeue().(cbItem)
- if h.isTransportResponseFrame() {
- if c.transportResponseFrames == maxQueuedTransportResponseFrames {
- // We are removing the frame that put us over the
- // threshold; close and clear the throttling channel.
- ch := c.trfChan.Load().(chan struct{})
- close(ch)
- c.trfChan.Store((chan struct{})(nil))
- }
- c.transportResponseFrames--
- }
- c.mu.Unlock()
- return h, nil
- }
- if !block {
- c.mu.Unlock()
- return nil, nil
+ return frame, err
}
c.consumerWaiting = true
c.mu.Unlock()
+
+ // Release the lock above and wait to be woken up.
select {
- case <-c.ch:
+ case <-c.wakeupCh:
case <-c.done:
return nil, errors.New("transport closed by client")
}
}
}
+// Callers must not use this method, but should instead use get().
+//
+// Caller must hold c.mu.
+func (c *controlBuffer) getOnceLocked() (any, error) {
+ if c.closed {
+ return false, ErrConnClosing
+ }
+ if c.list.isEmpty() {
+ return nil, nil
+ }
+ h := c.list.dequeue().(cbItem)
+ if h.isTransportResponseFrame() {
+ if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+ // We are removing the frame that put us over the
+ // threshold; close and clear the throttling channel.
+ ch := c.trfChan.Swap(nil)
+ close(*ch)
+ }
+ c.transportResponseFrames--
+ }
+ return h, nil
+}
+
+// finish closes the control buffer, cleaning up any streams that have queued
+// header frames. Once this method returns, no more frames can be added to the
+// control buffer, and attempts to do so will return ErrConnClosing.
func (c *controlBuffer) finish() {
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
+ defer c.mu.Unlock()
+
+ if c.closed {
return
}
- c.err = ErrConnClosing
+ c.closed = true
// There may be headers for streams in the control buffer.
// These streams need to be cleaned out since the transport
// is still not aware of these yet.
for head := c.list.dequeueAll(); head != nil; head = head.next {
- hdr, ok := head.it.(*headerFrame)
- if !ok {
- continue
- }
- if hdr.onOrphaned != nil { // It will be nil on the server-side.
- hdr.onOrphaned(ErrConnClosing)
+ switch v := head.it.(type) {
+ case *headerFrame:
+ if v.onOrphaned != nil { // It will be nil on the server-side.
+ v.onOrphaned(ErrConnClosing)
+ }
+ case *dataFrame:
+ _ = v.reader.Close()
}
}
+
// In case throttle() is currently in flight, it needs to be unblocked.
// Otherwise, the transport may not close, since the transport is closed by
// the reader encountering the connection error.
- ch, _ := c.trfChan.Load().(chan struct{})
+ ch := c.trfChan.Swap(nil)
if ch != nil {
- close(ch)
+ close(*ch)
}
- c.trfChan.Store((chan struct{})(nil))
- c.mu.Unlock()
}
type side int
@@ -466,7 +487,7 @@ const (
// stream maintains a queue of data frames; as loopy receives data frames
// it gets added to the queue of the relevant stream.
// Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resemebling to a round-robin scheduling over all streams. While
+// thereby closely resembling a round-robin scheduling over all streams. While
// processing a stream, loopy writes out data bytes from this stream capped by the min
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
type loopyWriter struct {
@@ -490,26 +511,29 @@ type loopyWriter struct {
draining bool
conn net.Conn
logger *grpclog.PrefixLogger
+ bufferPool mem.BufferPool
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter {
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
- side: s,
- cbuf: cbuf,
- sendQuota: defaultWindowSize,
- oiws: defaultWindowSize,
- estdStreams: make(map[uint32]*outStream),
- activeStreams: newOutStreamList(),
- framer: fr,
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- bdpEst: bdpEst,
- conn: conn,
- logger: logger,
+ side: s,
+ cbuf: cbuf,
+ sendQuota: defaultWindowSize,
+ oiws: defaultWindowSize,
+ estdStreams: make(map[uint32]*outStream),
+ activeStreams: newOutStreamList(),
+ framer: fr,
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ bdpEst: bdpEst,
+ conn: conn,
+ logger: logger,
+ ssGoAwayHandler: goAwayHandler,
+ bufferPool: bufferPool,
}
return l
}
@@ -535,8 +559,8 @@ const minBatchSize = 1000
// size is too low to give stream goroutines a chance to fill it up.
//
// Upon exiting, if the error causing the exit is not an I/O error, run()
-// flushes and closes the underlying connection. Otherwise, the connection is
-// left open to allow the I/O error to be encountered by the reader instead.
+// flushes the underlying connection. The connection is always left open to
+// allow different closing behavior on the client and server.
func (l *loopyWriter) run() (err error) {
defer func() {
if l.logger.V(logLevel) {
@@ -544,7 +568,6 @@ func (l *loopyWriter) run() (err error) {
}
if !isIOError(err) {
l.framer.writer.Flush()
- l.conn.Close()
}
l.cbuf.finish()
}()
@@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
// not be established yet.
delete(l.estdStreams, c.streamID)
str.deleteSelf()
+ for head := str.itl.dequeueAll(); head != nil; head = head.next {
+ if df, ok := head.it.(*dataFrame); ok {
+ _ = df.reader.Close()
+ }
+ }
}
if c.rst { // If RST_STREAM needs to be sent.
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
@@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
- // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
- // maximum possible HTTP2 frame size.
+ // Every dataFrame has two buffers; h that keeps grpc-message header and data
+ // that is the actual message. As an optimization to keep wire traffic low, data
+ // from data is copied to h to make as big as the maximum possible HTTP2 frame
+ // size.
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
+ if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
+ _ = dataItem.reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
- var (
- buf []byte
- )
+
// Figure out the maximum size we can send
maxSize := http2MaxFrameLen
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
@@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) {
}
// Compute how much of the header and data we can send within quota and max frame length
hSize := min(maxSize, len(dataItem.h))
- dSize := min(maxSize-hSize, len(dataItem.d))
- if hSize != 0 {
- if dSize == 0 {
- buf = dataItem.h
- } else {
- // We can add some data to grpc message header to distribute bytes more equally across frames.
- // Copy on the stack to avoid generating garbage
- var localBuf [http2MaxFrameLen]byte
- copy(localBuf[:hSize], dataItem.h)
- copy(localBuf[hSize:], dataItem.d[:dSize])
- buf = localBuf[:hSize+dSize]
- }
+ dSize := min(maxSize-hSize, dataItem.reader.Remaining())
+ remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
+ size := hSize + dSize
+
+ var buf *[]byte
+
+ if hSize != 0 && dSize == 0 {
+ buf = &dataItem.h
} else {
- buf = dataItem.d
- }
+ // Note: this is only necessary because the http2.Framer does not support
+ // partially writing a frame, so the sequence must be materialized into a buffer.
+ // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
+ pool := l.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ buf = pool.Get(size)
+ defer pool.Put(buf)
- size := hSize + dSize
+ copy((*buf)[:hSize], dataItem.h)
+ _, _ = dataItem.reader.Read((*buf)[hSize:])
+ }
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
+ if dataItem.endStream && remainingBytes == 0 {
endStream = true
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
+ if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
return false, err
}
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
dataItem.h = dataItem.h[hSize:]
- dataItem.d = dataItem.d[dSize:]
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
+ if remainingBytes == 0 { // All the data from that message was written out.
+ _ = dataItem.reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
@@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 98f80e3fa..ce878693b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,7 +24,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -35,30 +34,27 @@ import (
"sync"
"time"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
)
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
- if r.ProtoMajor != 2 {
- msg := "gRPC requires HTTP/2"
- http.Error(w, msg, http.StatusBadRequest)
- return nil, errors.New(msg)
- }
- if r.Method != "POST" {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
+ if r.Method != http.MethodPost {
+ w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
- http.Error(w, msg, http.StatusBadRequest)
+ http.Error(w, msg, http.StatusMethodNotAllowed)
return nil, errors.New(msg)
}
contentType := r.Header.Get("Content-Type")
@@ -69,20 +65,40 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
http.Error(w, msg, http.StatusUnsupportedMediaType)
return nil, errors.New(msg)
}
+ if r.ProtoMajor != 2 {
+ msg := "gRPC requires HTTP/2"
+ http.Error(w, msg, http.StatusHTTPVersionNotSupported)
+ return nil, errors.New(msg)
+ }
if _, ok := w.(http.Flusher); !ok {
msg := "gRPC requires a ResponseWriter supporting http.Flusher"
http.Error(w, msg, http.StatusInternalServerError)
return nil, errors.New(msg)
}
+ var localAddr net.Addr
+ if la := r.Context().Value(http.LocalAddrContextKey); la != nil {
+ localAddr, _ = la.(net.Addr)
+ }
+ var authInfo credentials.AuthInfo
+ if r.TLS != nil {
+ authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
+ }
+ p := peer.Peer{
+ Addr: strAddr(r.RemoteAddr),
+ LocalAddr: localAddr,
+ AuthInfo: authInfo,
+ }
st := &serverHandlerTransport{
rw: w,
req: r,
closedCh: make(chan struct{}),
writes: make(chan func()),
+ peer: p,
contentType: contentType,
contentSubtype: contentSubtype,
stats: stats,
+ bufferPool: bufferPool,
}
st.logger = prefixLoggerForServerHandlerTransport(st)
@@ -134,6 +150,8 @@ type serverHandlerTransport struct {
headerMD metadata.MD
+ peer peer.Peer
+
closeOnce sync.Once
closedCh chan struct{} // closed on Close
@@ -154,6 +172,8 @@ type serverHandlerTransport struct {
stats []stats.Handler
logger *grpclog.PrefixLogger
+
+ bufferPool mem.BufferPool
}
func (ht *serverHandlerTransport) Close(err error) {
@@ -165,7 +185,13 @@ func (ht *serverHandlerTransport) Close(err error) {
})
}
-func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
+func (ht *serverHandlerTransport) Peer() *peer.Peer {
+ return &peer.Peer{
+ Addr: ht.peer.Addr,
+ LocalAddr: ht.peer.LocalAddr,
+ AuthInfo: ht.peer.AuthInfo,
+ }
+}
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
// the empty string if unknown.
@@ -220,18 +246,21 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
h.Set("Grpc-Message", encodeGrpcMessage(m))
}
+ s.hdrMu.Lock()
+ defer s.hdrMu.Unlock()
if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
panic(err)
}
- h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
+ h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes))
}
- if md := s.Trailer(); len(md) > 0 {
- for k, vv := range md {
+ if len(s.trailer) > 0 {
+ for k, vv := range s.trailer {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
if isReservedHeader(k) {
continue
@@ -287,7 +316,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
}
// writeCustomHeaders sets custom headers set on the stream via SetHeader
-// on the first write call (Write, WriteHeader, or WriteStatus).
+// on the first write call (Write, WriteHeader, or WriteStatus)
func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
h := ht.rw.Header()
@@ -304,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
s.hdrMu.Unlock()
}
-func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+ // Always take a reference because otherwise there is no guarantee the data will
+ // be available after this function returns. This is what callers to Write
+ // expect.
+ data.Ref()
headersWritten := s.updateHeaderSent()
- return ht.do(func() {
+ err := ht.do(func() {
+ defer data.Free()
if !headersWritten {
ht.writePendingHeaders(s)
}
ht.rw.Write(hdr)
- ht.rw.Write(data)
+ for _, b := range data {
+ _, _ = ht.rw.Write(b.ReadOnlyData())
+ }
ht.rw.(http.Flusher).Flush()
})
+ if err != nil {
+ data.Free()
+ return err
+ }
+ return nil
}
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
@@ -344,10 +385,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
return err
}
-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
+func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
-
- ctx := ht.req.Context()
var cancel context.CancelFunc
if ht.timeoutSet {
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
@@ -367,37 +406,22 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
ht.Close(errors.New("request is done processing"))
}()
+ ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
req := ht.req
-
s := &Stream{
- id: 0, // irrelevant
- requestRead: func(int) {},
- cancel: cancel,
- buf: newRecvBuffer(),
- st: ht,
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
- contentSubtype: ht.contentSubtype,
- }
- pr := &peer.Peer{
- Addr: ht.RemoteAddr(),
- }
- if req.TLS != nil {
- pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
- }
- ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
- s.ctx = peer.NewContext(ctx, pr)
- for _, sh := range ht.stats {
- s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
- inHeader := &stats.InHeader{
- FullMethod: s.method,
- RemoteAddr: ht.RemoteAddr(),
- Compression: s.recvCompress,
- }
- sh.HandleRPC(s.ctx, inHeader)
+ id: 0, // irrelevant
+ ctx: ctx,
+ requestRead: func(int) {},
+ cancel: cancel,
+ buf: newRecvBuffer(),
+ st: ht,
+ method: req.URL.Path,
+ recvCompress: req.Header.Get("grpc-encoding"),
+ contentSubtype: ht.contentSubtype,
+ headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
+ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
windowHandler: func(int) {},
}
@@ -406,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
go func() {
defer close(readerDone)
- // TODO: minimize garbage, optimize recvBuffer code/ownership
- const readSize = 8196
- for buf := make([]byte, readSize); ; {
- n, err := req.Body.Read(buf)
+ for {
+ buf := ht.bufferPool.Get(http2MaxFrameLen)
+ n, err := req.Body.Read(*buf)
if n > 0 {
- s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
- buf = buf[n:]
+ *buf = (*buf)[:n]
+ s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
+ } else {
+ ht.bufferPool.Put(buf)
}
if err != nil {
s.buf.put(recvMsg{err: mapRecvMsgError(err)})
return
}
- if len(buf) == 0 {
- buf = make([]byte, readSize)
- }
}
}()
@@ -453,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
func (ht *serverHandlerTransport) IncrMsgRecv() {}
-func (ht *serverHandlerTransport) Drain(debugData string) {
+func (ht *serverHandlerTransport) Drain(string) {
panic("Drain() is not implemented")
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index badab8acf..62b81885d 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -36,6 +36,7 @@ import (
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
icredentials "google.golang.org/grpc/internal/credentials"
"google.golang.org/grpc/internal/grpclog"
@@ -43,9 +44,10 @@ import (
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
istatus "google.golang.org/grpc/internal/status"
- "google.golang.org/grpc/internal/syscall"
+ isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@@ -58,6 +60,10 @@ import (
// atomically.
var clientConnectionCounter uint64
+var goAwayLoopyWriterTimeout = 5 * time.Second
+
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
@@ -80,9 +86,9 @@ type http2Client struct {
writerDone chan struct{} // sync point to enable testing.
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
// that the server sent GoAway on this transport.
- goAway chan struct{}
-
- framer *framer
+ goAway chan struct{}
+ keepaliveDone chan struct{} // Closed when the keepalive goroutine exits.
+ framer *framer
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
// Do not access controlBuf with mu held.
@@ -111,11 +117,11 @@ type http2Client struct {
streamQuota int64
streamsQuotaAvailable chan struct{}
waitingStreams uint32
- nextID uint32
registeredCompressors string
// Do not access controlBuf with mu held.
mu sync.Mutex // guard the following variables
+ nextID uint32
state transportState
activeStreams map[uint32]*Stream
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
@@ -137,13 +143,11 @@ type http2Client struct {
// variable.
kpDormant bool
- // Fields below are for channelz metric collection.
- channelzID *channelz.Identifier
- czData *channelzData
+ channelz *channelz.Socket
onClose func(GoAwayReason)
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
connectionID uint64
logger *grpclog.PrefixLogger
@@ -176,7 +180,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
if networkType == "tcp" && useProxy {
return proxyDial(ctx, address, grpcUA)
}
- return (&net.Dialer{}).DialContext(ctx, networkType, address)
+ return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
}
func isTemporary(err error) bool {
@@ -228,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
}(conn)
- // The following defer and goroutine monitor the connectCtx for cancelation
+ // The following defer and goroutine monitor the connectCtx for cancellation
// and deadline. On context expiration, the connection is hard closed and
// this function will naturally fail as a result. Otherwise, the defer
// waits for the goroutine to exit to prevent the context from being
@@ -262,7 +266,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
keepaliveEnabled := false
if kp.Time != infinity {
- if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+ if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
}
keepaliveEnabled = true
@@ -316,6 +320,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
if opts.MaxHeaderListSize != nil {
maxHeaderListSize = *opts.MaxHeaderListSize
}
+
t := &http2Client{
ctx: ctx,
ctxDone: ctx.Done(), // Cache Done chan.
@@ -330,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
+ keepaliveDone: make(chan struct{}),
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
@@ -343,11 +349,25 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
maxConcurrentStreams: defaultMaxStreamsClient,
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
- czData: new(channelzData),
keepaliveEnabled: keepaliveEnabled,
- bufferPool: newBufferPool(),
+ bufferPool: opts.BufferPool,
onClose: onClose,
}
+ var czSecurity credentials.ChannelzSecurityValue
+ if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
+ czSecurity = au.GetSecurityValue()
+ }
+ t.channelz = channelz.RegisterSocket(
+ &channelz.Socket{
+ SocketType: channelz.SocketTypeNormal,
+ Parent: opts.ChannelzParent,
+ SocketMetrics: channelz.SocketMetrics{},
+ EphemeralMetrics: t.socketMetrics,
+ LocalAddr: t.localAddr,
+ RemoteAddr: t.remoteAddr,
+ SocketOptions: channelz.GetSocketOption(t.conn),
+ Security: czSecurity,
+ })
t.logger = prefixLoggerForClientTransport(t)
// Add peer information to the http2client context.
t.ctx = peer.NewContext(t.ctx, t.getPeer())
@@ -378,10 +398,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
sh.HandleConn(t.ctx, connBegin)
}
- t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
- if err != nil {
- return nil, err
- }
if t.keepaliveEnabled {
t.kpDormancyCond = sync.NewCond(&t.mu)
go t.keepalive()
@@ -396,10 +412,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
readerErrCh := make(chan error, 1)
go t.reader(readerErrCh)
defer func() {
- if err == nil {
- err = <-readerErrCh
- }
if err != nil {
+ // writerDone should be closed since the loopy goroutine
+ // wouldn't have started in the case this function returns an error.
+ close(t.writerDone)
t.Close(err)
}
}()
@@ -446,9 +462,19 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
if err := t.framer.writer.Flush(); err != nil {
return nil, err
}
+ // Block until the server preface is received successfully or an error occurs.
+ if err = <-readerErrCh; err != nil {
+ return nil, err
+ }
go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
- t.loopy.run()
+ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
+ if err := t.loopy.run(); !isIOError(err) {
+ // Immediately close the connection, as the loopy writer returns
+ // when there are no more active streams and we were draining (the
+ // server sent a GOAWAY). For I/O errors, the reader will hit it
+ // after draining any remaining incoming data.
+ t.conn.Close()
+ }
close(t.writerDone)
}()
return t, nil
@@ -482,7 +508,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
closeStream: func(err error) {
t.CloseStream(s, err)
},
- freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -493,11 +518,24 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
func (t *http2Client) getPeer() *peer.Peer {
return &peer.Peer{
- Addr: t.remoteAddr,
- AuthInfo: t.authInfo, // Can be nil
+ Addr: t.remoteAddr,
+ AuthInfo: t.authInfo, // Can be nil
+ LocalAddr: t.localAddr,
}
}
+// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway
+// to be the last frame loopy writes to the transport.
+func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
+ t.mu.Lock()
+ maxStreamID := t.nextID - 2
+ t.mu.Unlock()
+ if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil {
+ return false, err
+ }
+ return false, g.closeConn
+}
+
func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
aud := t.createAudience(callHdr)
ri := credentials.RequestInfo{
@@ -566,7 +604,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
}
- if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+ if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
var k string
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
@@ -736,7 +774,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
hdr := &headerFrame{
hf: headerFields,
endStream: false,
- initStream: func(id uint32) error {
+ initStream: func(uint32) error {
t.mu.Lock()
// TODO: handle transport closure in loopy instead and remove this
// initStream is never called when transport is draining.
@@ -746,8 +784,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return ErrConnClosing
}
if channelz.IsOn() {
- atomic.AddInt64(&t.czData.streamsStarted, 1)
- atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
+ t.channelz.SocketMetrics.StreamsStarted.Add(1)
+ t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano())
}
// If the keepalive goroutine has gone dormant, wake it up.
if t.kpDormant {
@@ -762,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
firstTry := true
var ch chan struct{}
transportDrainRequired := false
- checkForStreamQuota := func(it any) bool {
+ checkForStreamQuota := func() bool {
if t.streamQuota <= 0 { // Can go negative if server decreases it.
if firstTry {
t.waitingStreams++
@@ -774,23 +812,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
t.waitingStreams--
}
t.streamQuota--
- h := it.(*headerFrame)
- h.streamID = t.nextID
- t.nextID += 2
- // Drain client transport if nextID > MaxStreamID which signals gRPC that
- // the connection is closed and a new one must be created for subsequent RPCs.
- transportDrainRequired = t.nextID > MaxStreamID
-
- s.id = h.streamID
- s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
t.mu.Lock()
if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
t.mu.Unlock()
return false // Don't create a stream if the transport is already closed.
}
+
+ hdr.streamID = t.nextID
+ t.nextID += 2
+ // Drain client transport if nextID > MaxStreamID which signals gRPC that
+ // the connection is closed and a new one must be created for subsequent RPCs.
+ transportDrainRequired = t.nextID > MaxStreamID
+
+ s.id = hdr.streamID
+ s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
t.activeStreams[s.id] = s
t.mu.Unlock()
+
if t.streamQuota > 0 && t.waitingStreams > 0 {
select {
case t.streamsQuotaAvailable <- struct{}{}:
@@ -800,13 +839,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return true
}
var hdrListSizeErr error
- checkForHeaderListSize := func(it any) bool {
+ checkForHeaderListSize := func() bool {
if t.maxSendHeaderListSize == nil {
return true
}
- hdrFrame := it.(*headerFrame)
var sz int64
- for _, f := range hdrFrame.hf {
+ for _, f := range hdr.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
return false
@@ -815,8 +853,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
return true
}
for {
- success, err := t.controlBuf.executeAndPut(func(it any) bool {
- return checkForHeaderListSize(it) && checkForStreamQuota(it)
+ success, err := t.controlBuf.executeAndPut(func() bool {
+ return checkForHeaderListSize() && checkForStreamQuota()
}, hdr)
if err != nil {
// Connection closed.
@@ -918,16 +956,16 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
t.mu.Unlock()
if channelz.IsOn() {
if eosReceived {
- atomic.AddInt64(&t.czData.streamsSucceeded, 1)
+ t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
} else {
- atomic.AddInt64(&t.czData.streamsFailed, 1)
+ t.channelz.SocketMetrics.StreamsFailed.Add(1)
}
}
},
rst: rst,
rstCode: rstCode,
}
- addBackStreamQuota := func(any) bool {
+ addBackStreamQuota := func() bool {
t.streamQuota++
if t.streamQuota > 0 && t.waitingStreams > 0 {
select {
@@ -947,8 +985,9 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
-// accessed any more.
+// accessed anymore.
func (t *http2Client) Close(err error) {
+ t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@@ -971,15 +1010,33 @@ func (t *http2Client) Close(err error) {
// should unblock it so that the goroutine eventually exits.
t.kpDormancyCond.Signal()
}
- t.mu.Unlock()
- t.controlBuf.finish()
- t.cancel()
- t.conn.Close()
- channelz.RemoveEntry(t.channelzID)
// Append info about previous goaways if there were any, since this may be important
// for understanding the root cause for this connection to be closed.
- _, goAwayDebugMessage := t.GetGoAwayReason()
+ goAwayDebugMessage := t.goAwayDebugMessage
+ t.mu.Unlock()
+ // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
+ // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
+ // also waits for loopyWriter to be closed with a timer to avoid the
+ // long blocking in case the connection is blackholed, i.e. TCP is
+ // just stuck.
+ t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
+ timer := time.NewTimer(goAwayLoopyWriterTimeout)
+ defer timer.Stop()
+ select {
+ case <-t.writerDone: // success
+ case <-timer.C:
+ t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
+ }
+ t.cancel()
+ t.conn.Close()
+ // Waits for the reader and keepalive goroutines to exit before returning to
+ // ensure all resources are cleaned up before Close can return.
+ <-t.readerDone
+ if t.keepaliveEnabled {
+ <-t.keepaliveDone
+ }
+ channelz.RemoveEntry(t.channelz.ID)
var st *status.Status
if len(goAwayDebugMessage) > 0 {
st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
@@ -1028,27 +1085,36 @@ func (t *http2Client) GracefulClose() {
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
-func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error {
+ reader := data.Reader()
+
if opts.Last {
// If it's the last message, update stream state.
if !s.compareAndSwapState(streamActive, streamWriteDone) {
+ _ = reader.Close()
return errStreamDone
}
} else if s.getState() != streamActive {
+ _ = reader.Close()
return errStreamDone
}
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
h: hdr,
- d: data,
+ reader: reader,
}
- if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return err
}
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ return nil
}
func (t *http2Client) getStream(f http2.Frame) *Stream {
@@ -1080,7 +1146,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) {
// for the transport and the stream based on the current bdp
// estimation.
func (t *http2Client) updateFlowControl(n uint32) {
- updateIWS := func(any) bool {
+ updateIWS := func() bool {
t.initialWindowSize = int32(n)
t.mu.Lock()
for _, s := range t.activeStreams {
@@ -1153,10 +1219,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
// The server has closed the stream without sending trailers. Record that
@@ -1185,7 +1254,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
if statusCode == codes.Canceled {
if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
// Our deadline was already exceeded, and that was likely the cause
- // of this cancelation. Alter the status code accordingly.
+ // of this cancellation. Alter the status code accordingly.
statusCode = codes.DeadlineExceeded
}
}
@@ -1233,7 +1302,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
}
updateFuncs = append(updateFuncs, updateStreamQuota)
}
- t.controlBuf.executeAndPut(func(any) bool {
+ t.controlBuf.executeAndPut(func() bool {
for _, f := range updateFuncs {
f()
}
@@ -1254,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
t.controlBuf.put(pingAck)
}
-func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
- return
+ return nil
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
@@ -1270,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
id := f.LastStreamID
if id > 0 && id%2 == 0 {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
- return
+ return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)
}
// A client can receive multiple GoAways from the server (see
// https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
@@ -1288,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
if id > t.prevGoAwayID {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
- return
+ return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)
}
default:
t.setGoAwayReason(f)
@@ -1313,18 +1380,15 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.prevGoAwayID = id
if len(t.activeStreams) == 0 {
t.mu.Unlock()
- t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
- return
+ return connectionErrorf(true, nil, "received goaway and there are no active streams")
}
streamsToClose := make([]*Stream, 0)
for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server.
- if streamID > id && streamID <= upperLimit {
- atomic.StoreUint32(&stream.unprocessed, 1)
- streamsToClose = append(streamsToClose, stream)
- }
+ atomic.StoreUint32(&stream.unprocessed, 1)
+ streamsToClose = append(streamsToClose, stream)
}
}
t.mu.Unlock()
@@ -1333,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
for _, stream := range streamsToClose {
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
}
+ return nil
}
// setGoAwayReason sets the value of t.goAwayReason based
@@ -1399,7 +1464,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
mdata = make(map[string][]string)
contentTypeErr = "malformed header: missing HTTP content-type"
grpcMessage string
- statusGen *status.Status
recvCompress string
httpStatusCode *int
httpStatusErr string
@@ -1434,12 +1498,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
rawStatusCode = codes.Code(uint32(code))
case "grpc-message":
grpcMessage = decodeGrpcMessage(hf.Value)
- case "grpc-status-details-bin":
- var err error
- statusGen, err = decodeGRPCStatusDetails(hf.Value)
- if err != nil {
- headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err)
- }
case ":status":
if hf.Value == "200" {
httpStatusErr = ""
@@ -1548,14 +1606,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return
}
- if statusGen == nil {
- statusGen = status.New(rawStatusCode, grpcMessage)
- }
+ status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
// If client received END_STREAM from server while stream was still active,
// send RST_STREAM.
rstStream := s.getState() == streamActive
- t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true)
+ t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true)
}
// readServerPreface reads and handles the initial settings frame from the
@@ -1577,7 +1633,13 @@ func (t *http2Client) readServerPreface() error {
// network connection. If the server preface is not read successfully, an
// error is pushed to errCh; otherwise errCh is closed with no error.
func (t *http2Client) reader(errCh chan<- error) {
- defer close(t.readerDone)
+ var errClose error
+ defer func() {
+ close(t.readerDone)
+ if errClose != nil {
+ t.Close(errClose)
+ }
+ }()
if err := t.readServerPreface(); err != nil {
errCh <- err
@@ -1616,11 +1678,10 @@ func (t *http2Client) reader(errCh chan<- error) {
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
- } else {
- // Transport error.
- t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
- return
}
+ // Transport error.
+ errClose = connectionErrorf(true, err, "error reading from server: %v", err)
+ return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
@@ -1634,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) {
case *http2.PingFrame:
t.handlePing(frame)
case *http2.GoAwayFrame:
- t.handleGoAway(frame)
+ errClose = t.handleGoAway(frame)
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
default:
@@ -1645,15 +1706,15 @@ func (t *http2Client) reader(errCh chan<- error) {
}
}
-func minTime(a, b time.Duration) time.Duration {
- if a < b {
- return a
- }
- return b
-}
-
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
+ var err error
+ defer func() {
+ close(t.keepaliveDone)
+ if err != nil {
+ t.Close(err)
+ }
+ }()
p := &ping{data: [8]byte{}}
// True iff a ping has been sent, and no data has been received since then.
outstandingPing := false
@@ -1677,7 +1738,7 @@ func (t *http2Client) keepalive() {
continue
}
if outstandingPing && timeoutLeft <= 0 {
- t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
+ err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")
return
}
t.mu.Lock()
@@ -1709,7 +1770,7 @@ func (t *http2Client) keepalive() {
// keepalive timer expired. In both cases, we need to send a ping.
if !outstandingPing {
if channelz.IsOn() {
- atomic.AddInt64(&t.czData.kpCount, 1)
+ t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
}
t.controlBuf.put(p)
timeoutLeft = t.kp.Timeout
@@ -1719,7 +1780,7 @@ func (t *http2Client) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, timeoutLeft)
+ sleepDuration := min(t.kp.Time, timeoutLeft)
timeoutLeft -= sleepDuration
timer.Reset(sleepDuration)
case <-t.ctx.Done():
@@ -1739,40 +1800,23 @@ func (t *http2Client) GoAway() <-chan struct{} {
return t.goAway
}
-func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
- s := channelz.SocketInternalMetric{
- StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
- StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
- StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
- MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
- MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
- KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
- LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
- LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
- LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
- LocalFlowControlWindow: int64(t.fc.getSize()),
- SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
- // RemoteName :
- }
- if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
- s.Security = au.GetSecurityValue()
- }
- s.RemoteFlowControlWindow = t.getOutFlowWindow()
- return &s
+func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
+ return &channelz.EphemeralSocketMetrics{
+ LocalFlowControlWindow: int64(t.fc.getSize()),
+ RemoteFlowControlWindow: t.getOutFlowWindow(),
+ }
}
func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
func (t *http2Client) IncrMsgSent() {
- atomic.AddInt64(&t.czData.msgSent, 1)
- atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
+ t.channelz.SocketMetrics.MessagesSent.Add(1)
+ t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
}
func (t *http2Client) IncrMsgRecv() {
- atomic.AddInt64(&t.czData.msgRecv, 1)
- atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
+ t.channelz.SocketMetrics.MessagesReceived.Add(1)
+ t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
}
func (t *http2Client) getOutFlowWindow() int64 {
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index c06db679d..584b50fe5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -25,6 +25,7 @@ import (
"fmt"
"io"
"math"
+ "math/rand"
"net"
"net/http"
"strconv"
@@ -32,18 +33,18 @@ import (
"sync/atomic"
"time"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
@@ -68,18 +69,15 @@ var serverConnectionCounter uint64
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
- lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
- ctx context.Context
- done chan struct{}
- conn net.Conn
- loopy *loopyWriter
- readerDone chan struct{} // sync point to enable testing.
- writerDone chan struct{} // sync point to enable testing.
- remoteAddr net.Addr
- localAddr net.Addr
- authInfo credentials.AuthInfo // auth info about the connection
- inTapHandle tap.ServerInHandle
- framer *framer
+ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
+ done chan struct{}
+ conn net.Conn
+ loopy *loopyWriter
+ readerDone chan struct{} // sync point to enable testing.
+ loopyWriterDone chan struct{}
+ peer peer.Peer
+ inTapHandle tap.ServerInHandle
+ framer *framer
// The max number of concurrent streams.
maxStreams uint32
// controlBuf delivers all the control related tasks (e.g., window
@@ -121,9 +119,8 @@ type http2Server struct {
idle time.Time
// Fields below are for channelz metric collection.
- channelzID *channelz.Identifier
- czData *channelzData
- bufferPool *bufferPool
+ channelz *channelz.Socket
+ bufferPool mem.BufferPool
connectionID uint64
@@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
}
done := make(chan struct{})
+ peer := peer.Peer{
+ Addr: conn.RemoteAddr(),
+ LocalAddr: conn.LocalAddr(),
+ AuthInfo: authInfo,
+ }
t := &http2Server{
- ctx: setConnection(context.Background(), rawConn),
done: done,
conn: conn,
- remoteAddr: conn.RemoteAddr(),
- localAddr: conn.LocalAddr(),
- authInfo: authInfo,
+ peer: peer,
framer: framer,
readerDone: make(chan struct{}),
- writerDone: make(chan struct{}),
+ loopyWriterDone: make(chan struct{}),
maxStreams: config.MaxStreams,
inTapHandle: config.InTapHandle,
fc: &trInFlow{limit: uint32(icwz)},
@@ -263,12 +262,25 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
- czData: new(channelzData),
- bufferPool: newBufferPool(),
- }
+ bufferPool: config.BufferPool,
+ }
+ var czSecurity credentials.ChannelzSecurityValue
+ if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
+ czSecurity = au.GetSecurityValue()
+ }
+ t.channelz = channelz.RegisterSocket(
+ &channelz.Socket{
+ SocketType: channelz.SocketTypeNormal,
+ Parent: config.ChannelzParent,
+ SocketMetrics: channelz.SocketMetrics{},
+ EphemeralMetrics: t.socketMetrics,
+ LocalAddr: t.peer.LocalAddr,
+ RemoteAddr: t.peer.Addr,
+ SocketOptions: channelz.GetSocketOption(t.conn),
+ Security: czSecurity,
+ },
+ )
t.logger = prefixLoggerForServerTransport(t)
- // Add peer information to the http2server context.
- t.ctx = peer.NewContext(t.ctx, t.getPeer())
t.controlBuf = newControlBuffer(t.done)
if dynamicWindow {
@@ -277,18 +289,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
updateFlowControl: t.updateFlowControl,
}
}
- for _, sh := range t.stats {
- t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- })
- connBegin := &stats.ConnBegin{}
- sh.HandleConn(t.ctx, connBegin)
- }
- t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
- if err != nil {
- return nil, err
- }
t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
t.framer.writer.Flush()
@@ -331,10 +331,27 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger)
- t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
- t.loopy.run()
- close(t.writerDone)
+ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
+ err := t.loopy.run()
+ close(t.loopyWriterDone)
+ if !isIOError(err) {
+ // Close the connection if a non-I/O error occurs (for I/O errors
+ // the reader will also encounter the error and close). Wait 1
+ // second before closing the connection, or when the reader is done
+ // (i.e. the client already closed the connection or a connection
+ // error occurred). This avoids the potential problem where there
+ // is unread data on the receive side of the connection, which, if
+ // closed, would lead to a TCP RST instead of FIN, and the client
+ // encountering errors. For more info:
+ // https://github.com/grpc/grpc-go/issues/5358
+ timer := time.NewTimer(time.Second)
+ defer timer.Stop()
+ select {
+ case <-t.readerDone:
+ case <-timer.C:
+ }
+ t.conn.Close()
+ }
}()
go t.keepalive()
return t, nil
@@ -342,7 +359,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
// operateHeaders takes action on the decoded headers. Returns an error if fatal
// error encountered and transport needs to close, otherwise returns nil.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error {
+func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error {
// Acquire max stream ID lock for entire duration
t.maxStreamMu.Lock()
defer t.maxStreamMu.Unlock()
@@ -369,10 +386,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
buf := newRecvBuffer()
s := &Stream{
- id: streamID,
- st: t,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ id: streamID,
+ st: t,
+ buf: buf,
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ headerWireLength: int(frame.Header().Length),
}
var (
// if false, content-type was missing or invalid
@@ -511,9 +529,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.state = streamReadDone
}
if timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout)
+ s.ctx, s.cancel = context.WithTimeout(ctx, timeout)
} else {
- s.ctx, s.cancel = context.WithCancel(t.ctx)
+ s.ctx, s.cancel = context.WithCancel(ctx)
}
// Attach the received metadata to the context.
@@ -561,7 +579,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
if t.inTapHandle != nil {
var err error
- if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil {
+ if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil {
t.mu.Unlock()
if t.logger.V(logLevel) {
t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
@@ -586,33 +604,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
t.mu.Unlock()
if channelz.IsOn() {
- atomic.AddInt64(&t.czData.streamsStarted, 1)
- atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
+ t.channelz.SocketMetrics.StreamsStarted.Add(1)
+ t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
}
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
- s.ctx = traceCtx(s.ctx, s.method)
- for _, sh := range t.stats {
- s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
- inHeader := &stats.InHeader{
- FullMethod: s.method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: s.recvCompress,
- WireLength: int(frame.Header().Length),
- Header: mdata.Copy(),
- }
- sh.HandleRPC(s.ctx, inHeader)
- }
s.ctxDone = s.ctx.Done()
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctxDone,
- recv: s.buf,
- freeBuffer: t.bufferPool.put,
+ ctx: s.ctx,
+ ctxDone: s.ctxDone,
+ recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -630,8 +634,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
- defer close(t.readerDone)
+func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) {
+ defer func() {
+ close(t.readerDone)
+ <-t.loopyWriterDone
+ }()
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
@@ -656,18 +663,20 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.
}
continue
}
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- t.Close(err)
- return
- }
t.Close(err)
return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
- if err := t.operateHeaders(frame, handle, traceCtx); err != nil {
- t.Close(err)
- break
+ if err := t.operateHeaders(ctx, frame, handle); err != nil {
+ // Any error processing client headers, e.g. invalid stream ID,
+ // is considered a protocol violation.
+ t.controlBuf.put(&goAway{
+ code: http2.ErrCodeProtocol,
+ debugData: []byte(err.Error()),
+ closeConn: err,
+ })
+ continue
}
case *http2.DataFrame:
t.handleData(frame)
@@ -804,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
if f.StreamEnded() {
@@ -850,7 +862,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
}
return nil
})
- t.controlBuf.executeAndPut(func(any) bool {
+ t.controlBuf.executeAndPut(func() bool {
for _, f := range updateFuncs {
f()
}
@@ -980,7 +992,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
}
}
if err := t.writeHeaderLocked(s); err != nil {
- return status.Convert(err).Err()
+ switch e := err.(type) {
+ case ConnectionError:
+ return status.Error(codes.Unavailable, e.Desc)
+ default:
+ return status.Convert(err).Err()
+ }
}
return nil
}
@@ -999,12 +1016,13 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
- success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
+ hf := &headerFrame{
streamID: s.id,
hf: headerFields,
endStream: false,
onWrite: t.setResetPingStrikes,
- })
+ }
+ success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf)
if !success {
if err != nil {
return err
@@ -1053,12 +1071,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ // Do not use the user's grpc-status-details-bin (if present) if we are
+ // even attempting to set our own.
+ delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
} else {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)})
}
}
@@ -1071,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
onWrite: t.setResetPingStrikes,
}
- success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
+ success, err := t.controlBuf.executeAndPut(func() bool {
+ return t.checkForHeaderListSize(trailingHeader)
+ }, nil)
if !success {
if err != nil {
return err
@@ -1094,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
+ reader := data.Reader()
+
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
+ _ = reader.Close()
return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
+ _ = reader.Close()
return t.streamContextErr(s)
}
}
+
df := &dataFrame{
streamID: s.id,
h: hdr,
- d: data,
+ reader: reader,
onEachWrite: t.setResetPingStrikes,
}
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
+ _ = reader.Close()
return t.streamContextErr(s)
}
- return t.controlBuf.put(df)
+ if err := t.controlBuf.put(df); err != nil {
+ _ = reader.Close()
+ return err
+ }
+ return nil
}
// keepalive running in a separate goroutine does the following:
@@ -1190,12 +1223,12 @@ func (t *http2Server) keepalive() {
continue
}
if outstandingPing && kpTimeoutLeft <= 0 {
- t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time))
+ t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout))
return
}
if !outstandingPing {
if channelz.IsOn() {
- atomic.AddInt64(&t.czData.kpCount, 1)
+ t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
}
t.controlBuf.put(p)
kpTimeoutLeft = t.kp.Timeout
@@ -1205,7 +1238,7 @@ func (t *http2Server) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
+ sleepDuration := min(t.kp.Time, kpTimeoutLeft)
kpTimeoutLeft -= sleepDuration
kpTimer.Reset(sleepDuration)
case <-t.done:
@@ -1235,15 +1268,11 @@ func (t *http2Server) Close(err error) {
if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
}
- channelz.RemoveEntry(t.channelzID)
+ channelz.RemoveEntry(t.channelz.ID)
// Cancel all active streams.
for _, s := range streams {
s.cancel()
}
- for _, sh := range t.stats {
- connEnd := &stats.ConnEnd{}
- sh.HandleConn(t.ctx, connEnd)
- }
}
// deleteStream deletes the stream s from transport's active streams.
@@ -1260,9 +1289,9 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
if channelz.IsOn() {
if eosReceived {
- atomic.AddInt64(&t.czData.streamsSucceeded, 1)
+ t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
} else {
- atomic.AddInt64(&t.czData.streamsFailed, 1)
+ t.channelz.SocketMetrics.StreamsFailed.Add(1)
}
}
}
@@ -1309,10 +1338,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo
})
}
-func (t *http2Server) RemoteAddr() net.Addr {
- return t.remoteAddr
-}
-
func (t *http2Server) Drain(debugData string) {
t.mu.Lock()
defer t.mu.Unlock()
@@ -1349,6 +1374,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
+ t.framer.writer.Flush()
if retErr != nil {
return false, retErr
}
@@ -1369,7 +1395,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
return false, err
}
go func() {
- timer := time.NewTimer(time.Minute)
+ timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case <-t.drainEvent.Done():
@@ -1382,38 +1408,21 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
return false, nil
}
-func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
- s := channelz.SocketInternalMetric{
- StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
- StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
- StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
- MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
- MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
- KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
- LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
- LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
- LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
- LocalFlowControlWindow: int64(t.fc.getSize()),
- SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
- // RemoteName :
- }
- if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
- s.Security = au.GetSecurityValue()
- }
- s.RemoteFlowControlWindow = t.getOutFlowWindow()
- return &s
+func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics {
+ return &channelz.EphemeralSocketMetrics{
+ LocalFlowControlWindow: int64(t.fc.getSize()),
+ RemoteFlowControlWindow: t.getOutFlowWindow(),
+ }
}
func (t *http2Server) IncrMsgSent() {
- atomic.AddInt64(&t.czData.msgSent, 1)
- atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
+ t.channelz.SocketMetrics.MessagesSent.Add(1)
+ t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
}
func (t *http2Server) IncrMsgRecv() {
- atomic.AddInt64(&t.czData.msgRecv, 1)
- atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
+ t.channelz.SocketMetrics.MessagesReceived.Add(1)
+ t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
}
func (t *http2Server) getOutFlowWindow() int64 {
@@ -1431,10 +1440,12 @@ func (t *http2Server) getOutFlowWindow() int64 {
}
}
-func (t *http2Server) getPeer() *peer.Peer {
+// Peer returns the peer of the transport.
+func (t *http2Server) Peer() *peer.Peer {
return &peer.Peer{
- Addr: t.remoteAddr,
- AuthInfo: t.authInfo, // Can be nil
+ Addr: t.peer.Addr,
+ LocalAddr: t.peer.LocalAddr,
+ AuthInfo: t.peer.AuthInfo, // Can be nil
}
}
@@ -1444,7 +1455,7 @@ func getJitter(v time.Duration) time.Duration {
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
- j := grpcrand.Int63n(2*r) - r
+ j := rand.Int63n(2*r) - r
return time.Duration(j)
}
@@ -1459,6 +1470,6 @@ func GetConnection(ctx context.Context) net.Conn {
// SetConnection adds the connection to the context to be able to get
// information about the destination ip and port for an incoming RPC. This also
// allows any unary or streaming interceptors to see the connection.
-func setConnection(ctx context.Context, conn net.Conn) context.Context {
+func SetConnection(ctx context.Context, conn net.Conn) context.Context {
return context.WithValue(ctx, connectionKey{}, conn)
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 195814008..3613d7b64 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -34,12 +34,9 @@ import (
"time"
"unicode/utf8"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
- spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
const (
@@ -88,6 +85,8 @@ var (
}
)
+var grpcStatusDetailsBinHeader = "grpc-status-details-bin"
+
// isReservedHeader checks whether hdr belongs to HTTP2 headers
// reserved by gRPC protocol. Any other headers are classified as the
// user-specified metadata.
@@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool {
"grpc-message",
"grpc-status",
"grpc-timeout",
- "grpc-status-details-bin",
// Intentionally exclude grpc-previous-rpc-attempts and
// grpc-retry-pushback-ms, which are "reserved", but their API
// intentionally works via metadata.
@@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) {
return v, nil
}
-func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) {
- v, err := decodeBinHeader(rawDetails)
- if err != nil {
- return nil, err
- }
- st := &spb.Status{}
- if err = proto.Unmarshal(v, st); err != nil {
- return nil, err
- }
- return status.FromProto(st), nil
-}
-
type timeoutUnit uint8
const (
@@ -331,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
return w
}
-func (w *bufWriter) Write(b []byte) (n int, err error) {
+func (w *bufWriter) Write(b []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
if w.batchSize == 0 { // Buffer has been disabled.
- n, err = w.conn.Write(b)
+ n, err := w.conn.Write(b)
return n, toIOError(err)
}
if w.buf == nil {
b := w.pool.Get().(*[]byte)
w.buf = *b
}
+ written := 0
for len(b) > 0 {
- nn := copy(w.buf[w.offset:], b)
- b = b[nn:]
- w.offset += nn
- n += nn
- if w.offset >= w.batchSize {
- err = w.flushKeepBuffer()
+ copied := copy(w.buf[w.offset:], b)
+ b = b[copied:]
+ written += copied
+ w.offset += copied
+ if w.offset < w.batchSize {
+ continue
+ }
+ if err := w.flushKeepBuffer(); err != nil {
+ return written, err
}
}
- return n, err
+ return written, nil
}
func (w *bufWriter) Flush() error {
@@ -403,7 +393,7 @@ type framer struct {
fr *http2.Framer
}
-var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
+var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
@@ -432,10 +422,9 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
return f
}
-func getWriteBufferPool(writeBufferSize int) *sync.Pool {
+func getWriteBufferPool(size int) *sync.Pool {
writeBufferMutex.Lock()
defer writeBufferMutex.Unlock()
- size := writeBufferSize * 2
pool, ok := writeBufferPoolMap[size]
if ok {
return pool
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
index 415961987..54b224436 100644
--- a/vendor/google.golang.org/grpc/internal/transport/proxy.go
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -28,6 +28,8 @@ import (
"net/http"
"net/http/httputil"
"net/url"
+
+ "google.golang.org/grpc/internal"
)
const proxyAuthHeaderKey = "Proxy-Authorization"
@@ -105,14 +107,20 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
}
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
}
-
- return &bufConn{Conn: conn, r: r}, nil
+ // The buffer could contain extra bytes from the target server, so we can't
+ // discard it. However, in many cases where the server waits for the client
+ // to send the first message (e.g. when TLS is being used), the buffer will
+ // be empty, so we can avoid the overhead of reading through this buffer.
+ if r.Buffered() != 0 {
+ return &bufConn{Conn: conn, r: r}, nil
+ }
+ return conn, nil
}
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy
// is necessary, dials, does the HTTP CONNECT handshake, and returns the
// connection.
-func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
+func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) {
newAddr := addr
proxyURL, err := mapAddress(addr)
if err != nil {
@@ -122,15 +130,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn,
newAddr = proxyURL.Host
}
- conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr)
+ conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr)
if err != nil {
- return
+ return nil, err
}
- if proxyURL != nil {
+ if proxyURL == nil {
// proxy is disabled if proxyURL is nil.
- conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
+ return conn, err
}
- return
+ return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA)
}
func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 74a811fc0..e12cb0bc9 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -22,12 +22,12 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
"io"
"net"
+ "strings"
"sync"
"sync/atomic"
"time"
@@ -36,7 +36,9 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
@@ -45,32 +47,10 @@ import (
const logLevel = 2
-type bufferPool struct {
- pool sync.Pool
-}
-
-func newBufferPool() *bufferPool {
- return &bufferPool{
- pool: sync.Pool{
- New: func() any {
- return new(bytes.Buffer)
- },
- },
- }
-}
-
-func (p *bufferPool) get() *bytes.Buffer {
- return p.pool.Get().(*bytes.Buffer)
-}
-
-func (p *bufferPool) put(b *bytes.Buffer) {
- p.pool.Put(b)
-}
-
// recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed.
type recvMsg struct {
- buffer *bytes.Buffer
+ buffer mem.Buffer
// nil: received some data
// io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil.
@@ -100,6 +80,9 @@ func newRecvBuffer() *recvBuffer {
func (b *recvBuffer) put(r recvMsg) {
b.mu.Lock()
if b.err != nil {
+ // drop the buffer on the floor. Since b.err is not nil, any subsequent reads
+ // will always return an error, making this buffer inaccessible.
+ r.buffer.Free()
b.mu.Unlock()
// An error had occurred earlier, don't accept more
// data or errors.
@@ -146,45 +129,70 @@ type recvBufferReader struct {
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
- last *bytes.Buffer // Stores the remaining data in the previous calls.
+ last mem.Buffer // Stores the remaining data in the previous calls.
err error
- freeBuffer func(*bytes.Buffer)
}
-// Read reads the next len(p) bytes from last. If last is drained, it tries to
-// read additional data from recv. It blocks if there no additional data available
-// in recv. If Read returns any non-nil error, it will continue to return that error.
-func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
if r.last != nil {
- // Read remaining data left in last call.
- copied, _ := r.last.Read(p)
- if r.last.Len() == 0 {
- r.freeBuffer(r.last)
+ n, r.last = mem.ReadUnsafe(header, r.last)
+ return n, nil
+ }
+ if r.closeStream != nil {
+ n, r.err = r.readHeaderClient(header)
+ } else {
+ n, r.err = r.readHeader(header)
+ }
+ return n, r.err
+}
+
+// Read reads the next n bytes from last. If last is drained, it tries to read
+// additional data from recv. It blocks if there no additional data available in
+// recv. If Read returns any non-nil error, it will continue to return that
+// error.
+func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.last != nil {
+ buf = r.last
+ if r.last.Len() > n {
+ buf, r.last = mem.SplitUnsafe(buf, n)
+ } else {
r.last = nil
}
- return copied, nil
+ return buf, nil
}
if r.closeStream != nil {
- n, r.err = r.readClient(p)
+ buf, r.err = r.readClient(n)
} else {
- n, r.err = r.read(p)
+ buf, r.err = r.read(n)
}
- return n, r.err
+ return buf, r.err
}
-func (r *recvBufferReader) read(p []byte) (n int, err error) {
+func (r *recvBufferReader) readHeader(header []byte) (n int, err error) {
select {
case <-r.ctxDone:
return 0, ContextErr(r.ctx.Err())
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readHeaderAdditional(m, header)
}
}
-func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
+func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
+ select {
+ case <-r.ctxDone:
+ return nil, ContextErr(r.ctx.Err())
+ case m := <-r.recv.get():
+ return r.readAdditional(m, n)
+ }
+}
+
+func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) {
// If the context is canceled, then closes the stream with nil metadata.
// closeStream writes its error parameter to r.recv as a recvMsg.
// r.readAdditional acts on that message and returns the necessary error.
@@ -205,25 +213,67 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
- return r.readAdditional(m, p)
+ return r.readHeaderAdditional(m, header)
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readHeaderAdditional(m, header)
}
}
-func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
+func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
+ // If the context is canceled, then closes the stream with nil metadata.
+ // closeStream writes its error parameter to r.recv as a recvMsg.
+ // r.readAdditional acts on that message and returns the necessary error.
+ select {
+ case <-r.ctxDone:
+ // Note that this adds the ctx error to the end of recv buffer, and
+ // reads from the head. This will delay the error until recv buffer is
+ // empty, thus will delay ctx cancellation in Recv().
+ //
+ // It's done this way to fix a race between ctx cancel and trailer. The
+ // race was, stream.Recv() may return ctx error if ctxDone wins the
+ // race, but stream.Trailer() may return a non-nil md because the stream
+ // was not marked as done when trailer is received. This closeStream
+ // call will mark stream as done, thus fix the race.
+ //
+ // TODO: delaying ctx error seems like a unnecessary side effect. What
+ // we really want is to mark the stream as done, and return ctx error
+ // faster.
+ r.closeStream(ContextErr(r.ctx.Err()))
+ m := <-r.recv.get()
+ return r.readAdditional(m, n)
+ case m := <-r.recv.get():
+ return r.readAdditional(m, n)
+ }
+}
+
+func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
r.recv.load()
if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
return 0, m.err
}
- copied, _ := m.buffer.Read(p)
- if m.buffer.Len() == 0 {
- r.freeBuffer(m.buffer)
- r.last = nil
- } else {
- r.last = m.buffer
+
+ n, r.last = mem.ReadUnsafe(header, m.buffer)
+
+ return n, nil
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
+ r.recv.load()
+ if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
+ return nil, m.err
}
- return copied, nil
+
+ if m.buffer.Len() > n {
+ m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
+ }
+
+ return m.buffer, nil
}
type streamState uint32
@@ -239,7 +289,7 @@ const (
type Stream struct {
id uint32
st ServerTransport // nil for client side Stream
- ct *http2Client // nil for server side Stream
+ ct ClientTransport // nil for server side Stream
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
@@ -249,7 +299,7 @@ type Stream struct {
recvCompress string
sendCompress string
buf *recvBuffer
- trReader io.Reader
+ trReader *transportReader
fc *inFlow
wq *writeQuota
@@ -265,7 +315,8 @@ type Stream struct {
// headerValid indicates whether a valid header was received. Only
// meaningful after headerChan is closed (always call waitOnHeader() before
// reading its value). Not valid on server side.
- headerValid bool
+ headerValid bool
+ headerWireLength int // Only set on server side.
// hdrMu protects header and trailer metadata on the server-side.
hdrMu sync.Mutex
@@ -301,7 +352,7 @@ func (s *Stream) isHeaderSent() bool {
}
// updateHeaderSent updates headerSent and returns true
-// if it was alreay set. It is valid only on server-side.
+// if it was already set. It is valid only on server-side.
func (s *Stream) updateHeaderSent() bool {
return atomic.SwapUint32(&s.headerSent, 1) == 1
}
@@ -360,8 +411,12 @@ func (s *Stream) SendCompress() string {
// ClientAdvertisedCompressors returns the compressor names advertised by the
// client via grpc-accept-encoding header.
-func (s *Stream) ClientAdvertisedCompressors() string {
- return s.clientAdvertisedCompressors
+func (s *Stream) ClientAdvertisedCompressors() []string {
+ values := strings.Split(s.clientAdvertisedCompressors, ",")
+ for i, v := range values {
+ values[i] = strings.TrimSpace(v)
+ }
+ return values
}
// Done returns a channel which is closed when it receives the final status
@@ -401,7 +456,7 @@ func (s *Stream) TrailersOnly() bool {
return s.noHeaders
}
-// Trailer returns the cached trailer metedata. Note that if it is not called
+// Trailer returns the cached trailer metadata. Note that if it is not called
// after the entire stream is done, it could return an empty MD. Client
// side only.
// It can be safely read only after stream has ended that is either read
@@ -425,6 +480,12 @@ func (s *Stream) Context() context.Context {
return s.ctx
}
+// SetContext sets the context of the stream. This will be deleted once the
+// stats handler callouts all move to gRPC layer.
+func (s *Stream) SetContext(ctx context.Context) {
+ s.ctx = ctx
+}
+
// Method returns the method for the stream.
func (s *Stream) Method() string {
return s.method
@@ -437,6 +498,12 @@ func (s *Stream) Status() *status.Status {
return s.status
}
+// HeaderWireLength returns the size of the headers of the stream as received
+// from the wire. Valid only on the server.
+func (s *Stream) HeaderWireLength() int {
+ return s.headerWireLength
+}
+
// SetHeader sets the header metadata. This can be called multiple times.
// Server side only.
// This should not be called in parallel to other data writes.
@@ -480,36 +547,96 @@ func (s *Stream) write(m recvMsg) {
s.buf.put(m)
}
-// Read reads all p bytes from the wire for this stream.
-func (s *Stream) Read(p []byte) (n int, err error) {
+// ReadHeader reads data into the provided header slice from the stream. It
+// first checks if there was an error during a previous read operation and
+// returns it if present. It then requests a read operation for the length of
+// the header. It continues to read from the stream until the entire header
+// slice is filled or an error occurs. If an `io.EOF` error is encountered
+// with partially read data, it is converted to `io.ErrUnexpectedEOF` to
+// indicate an unexpected end of the stream. The method returns any error
+// encountered during the read process or nil if the header was successfully
+// read.
+func (s *Stream) ReadHeader(header []byte) (err error) {
// Don't request a read if there was an error earlier
- if er := s.trReader.(*transportReader).er; er != nil {
- return 0, er
+ if er := s.trReader.er; er != nil {
+ return er
}
- s.requestRead(len(p))
- return io.ReadFull(s.trReader, p)
+ s.requestRead(len(header))
+ for len(header) != 0 {
+ n, err := s.trReader.ReadHeader(header)
+ header = header[n:]
+ if len(header) == 0 {
+ err = nil
+ }
+ if err != nil {
+ if n > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ }
+ return nil
+}
+
+// Read reads n bytes from the wire for this stream.
+func (s *Stream) Read(n int) (data mem.BufferSlice, err error) {
+ // Don't request a read if there was an error earlier
+ if er := s.trReader.er; er != nil {
+ return nil, er
+ }
+ s.requestRead(n)
+ for n != 0 {
+ buf, err := s.trReader.Read(n)
+ var bufLen int
+ if buf != nil {
+ bufLen = buf.Len()
+ }
+ n -= bufLen
+ if n == 0 {
+ err = nil
+ }
+ if err != nil {
+ if bufLen > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ data.Free()
+ return nil, err
+ }
+ data = append(data, buf)
+ }
+ return data, nil
}
-// tranportReader reads all the data available for this Stream from the transport and
+// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
- reader io.Reader
+ reader *recvBufferReader
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
er error
}
-func (t *transportReader) Read(p []byte) (n int, err error) {
- n, err = t.reader.Read(p)
+func (t *transportReader) ReadHeader(header []byte) (int, error) {
+ n, err := t.reader.ReadHeader(header)
if err != nil {
t.er = err
- return
+ return 0, err
}
t.windowHandler(n)
- return
+ return n, nil
+}
+
+func (t *transportReader) Read(n int) (mem.Buffer, error) {
+ buf, err := t.reader.Read(n)
+ if err != nil {
+ t.er = err
+ return buf, err
+ }
+ t.windowHandler(buf.Len())
+ return buf, nil
}
// BytesReceived indicates whether any bytes have been received on this stream.
@@ -552,9 +679,10 @@ type ServerConfig struct {
WriteBufferSize int
ReadBufferSize int
SharedWriteBuffer bool
- ChannelzParentID *channelz.Identifier
+ ChannelzParent *channelz.Server
MaxHeaderListSize *uint32
HeaderTableSize *uint32
+ BufferPool mem.BufferPool
}
// ConnectOptions covers all relevant options for communicating with the server.
@@ -587,12 +715,14 @@ type ConnectOptions struct {
ReadBufferSize int
// SharedWriteBuffer indicates whether connections should reuse write buffer
SharedWriteBuffer bool
- // ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
- ChannelzParentID *channelz.Identifier
+ // ChannelzParent sets the addrConn id which initiated the creation of this client transport.
+ ChannelzParent *channelz.SubChannel
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
MaxHeaderListSize *uint32
// UseProxy specifies if a proxy should be used.
UseProxy bool
+ // The mem.BufferPool to use when reading/writing to the wire.
+ BufferPool mem.BufferPool
}
// NewClientTransport establishes the transport with the required ConnectOptions
@@ -654,7 +784,7 @@ type ClientTransport interface {
// Write sends the data for the given stream. A nil stream indicates
// the write is to be performed on the transport as a whole.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// NewStream creates a Stream for an RPC.
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
@@ -698,7 +828,7 @@ type ClientTransport interface {
// Write methods for a given Stream will be called serially.
type ServerTransport interface {
// HandleStreams receives incoming streams using the given handler.
- HandleStreams(func(*Stream), func(context.Context, string) context.Context)
+ HandleStreams(context.Context, func(*Stream))
// WriteHeader sends the header metadata for the given stream.
// WriteHeader may not be called on all streams.
@@ -706,7 +836,7 @@ type ServerTransport interface {
// Write sends the data for the given stream.
// Write may not be called on all streams.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
+ Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// WriteStatus sends the status of a stream to the client. WriteStatus is
// the final call made on a stream and always occurs.
@@ -717,8 +847,8 @@ type ServerTransport interface {
// handlers will be terminated asynchronously.
Close(err error)
- // RemoteAddr returns the remote network address.
- RemoteAddr() net.Addr
+ // Peer returns the peer of the server transport.
+ Peer() *peer.Peer
// Drain notifies the client this ServerTransport stops accepting new RPCs.
Drain(debugData string)
@@ -779,7 +909,7 @@ var (
// connection is draining. This could be caused by goaway or balancer
// removing the address.
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
- // errStreamDone is returned from write at the client side to indiacte application
+ // errStreamDone is returned from write at the client side to indicate application
// layer of an error.
errStreamDone = errors.New("the stream is done")
// StatusGoAway indicates that the server sent a GOAWAY that included this
@@ -801,30 +931,6 @@ const (
GoAwayTooManyPings GoAwayReason = 2
)
-// channelzData is used to store channelz related data for http2Client and http2Server.
-// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
-// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
-// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
-type channelzData struct {
- kpCount int64
- // The number of streams that have started, including already finished ones.
- streamsStarted int64
- // Client side: The number of streams that have ended successfully by receiving
- // EoS bit set frame from server.
- // Server side: The number of streams that have ended successfully by sending
- // frame with EoS bit set.
- streamsSucceeded int64
- streamsFailed int64
- // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
- // instead of time.Time since it's more costly to atomically update time.Time variable than int64
- // variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
- lastStreamCreatedTime int64
- msgSent int64
- msgRecv int64
- lastMsgSentTime int64
- lastMsgRecvTime int64
-}
-
// ContextErr converts the error from context package into a status error.
func ContextErr(err error) error {
switch err {
diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
deleted file mode 100644
index e8b492774..000000000
--- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright 2021 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package internal
-
-import (
- "google.golang.org/grpc/attributes"
- "google.golang.org/grpc/resolver"
-)
-
-// handshakeClusterNameKey is the type used as the key to store cluster name in
-// the Attributes field of resolver.Address.
-type handshakeClusterNameKey struct{}
-
-// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field
-// is updated with the cluster name.
-func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address {
- addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName)
- return addr
-}
-
-// GetXDSHandshakeClusterName returns cluster name stored in attr.
-func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) {
- v := attr.Value(handshakeClusterNameKey{})
- name, ok := v.(string)
- return name, ok
-}
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
index 34d31b5e7..eb42b19fb 100644
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -34,15 +34,29 @@ type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it
// pings the server to see if the transport is still alive.
// If set below 10s, a minimum value of 10s will be used instead.
- Time time.Duration // The current default value is infinity.
+ //
+ // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
+ // minutes (which means the client shouldn't ping more frequently than every
+ // 5 minutes).
+ //
+ // Though not ideal, it's not a strong requirement for Time to be less than
+ // EnforcementPolicy.MinTime. Time will automatically double if the server
+ // disconnects due to its enforcement policy.
+ //
+ // For more details, see
+ // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
+ Time time.Duration
// After having pinged for keepalive check, the client waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
- Timeout time.Duration // The current default value is 20 seconds.
+ //
+ // If keepalive is enabled, and this value is not explicitly set, the default
+ // is 20 seconds.
+ Timeout time.Duration
// If true, client sends keepalive pings even with no active RPCs. If false,
// when there are no active RPCs, Time and Timeout will be ignored and no
// keepalive pings will be sent.
- PermitWithoutStream bool // false by default.
+ PermitWithoutStream bool
}
// ServerParameters is used to set keepalive and max-age parameters on the
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
new file mode 100644
index 000000000..c37c58c02
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -0,0 +1,194 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "sort"
+ "sync"
+
+ "google.golang.org/grpc/internal"
+)
+
+// BufferPool is a pool of buffers that can be shared and reused, resulting in
+// decreased memory allocation.
+type BufferPool interface {
+ // Get returns a buffer with specified length from the pool.
+ Get(length int) *[]byte
+
+ // Put returns a buffer to the pool.
+ Put(*[]byte)
+}
+
+var defaultBufferPoolSizes = []int{
+ 256,
+ 4 << 10, // 4KB (go page size)
+ 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
+ 32 << 10, // 32KB (default buffer size for io.Copy)
+ 1 << 20, // 1MB
+}
+
+var defaultBufferPool BufferPool
+
+func init() {
+ defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
+
+ internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
+ defaultBufferPool = pool
+ }
+
+ internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
+ bufferPoolingThreshold = threshold
+ }
+}
+
+// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
+// created with NewBufferPool that uses a set of default sizes optimized for
+// expected workflows.
+func DefaultBufferPool() BufferPool {
+ return defaultBufferPool
+}
+
+// NewTieredBufferPool returns a BufferPool implementation that uses multiple
+// underlying pools of the given pool sizes.
+func NewTieredBufferPool(poolSizes ...int) BufferPool {
+ sort.Ints(poolSizes)
+ pools := make([]*sizedBufferPool, len(poolSizes))
+ for i, s := range poolSizes {
+ pools[i] = newSizedBufferPool(s)
+ }
+ return &tieredBufferPool{
+ sizedPools: pools,
+ }
+}
+
+// tieredBufferPool implements the BufferPool interface with multiple tiers of
+// buffer pools for different sizes of buffers.
+type tieredBufferPool struct {
+ sizedPools []*sizedBufferPool
+ fallbackPool simpleBufferPool
+}
+
+func (p *tieredBufferPool) Get(size int) *[]byte {
+ return p.getPool(size).Get(size)
+}
+
+func (p *tieredBufferPool) Put(buf *[]byte) {
+ p.getPool(cap(*buf)).Put(buf)
+}
+
+func (p *tieredBufferPool) getPool(size int) BufferPool {
+ poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
+ return p.sizedPools[i].defaultSize >= size
+ })
+
+ if poolIdx == len(p.sizedPools) {
+ return &p.fallbackPool
+ }
+
+ return p.sizedPools[poolIdx]
+}
+
+// sizedBufferPool is a BufferPool implementation that is optimized for specific
+// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
+// of 16kb and a sizedBufferPool can be configured to only return buffers with a
+// capacity of 16kb. Note that however it does not support returning larger
+// buffers and in fact panics if such a buffer is requested. Because of this,
+// this BufferPool implementation is not meant to be used on its own and rather
+// is intended to be embedded in a tieredBufferPool such that Get is only
+// invoked when the required size is smaller than or equal to defaultSize.
+type sizedBufferPool struct {
+ pool sync.Pool
+ defaultSize int
+}
+
+func (p *sizedBufferPool) Get(size int) *[]byte {
+ buf := p.pool.Get().(*[]byte)
+ b := *buf
+ clear(b[:cap(b)])
+ *buf = b[:size]
+ return buf
+}
+
+func (p *sizedBufferPool) Put(buf *[]byte) {
+ if cap(*buf) < p.defaultSize {
+ // Ignore buffers that are too small to fit in the pool. Otherwise, when
+ // Get is called it will panic as it tries to index outside the bounds
+ // of the buffer.
+ return
+ }
+ p.pool.Put(buf)
+}
+
+func newSizedBufferPool(size int) *sizedBufferPool {
+ return &sizedBufferPool{
+ pool: sync.Pool{
+ New: func() any {
+ buf := make([]byte, size)
+ return &buf
+ },
+ },
+ defaultSize: size,
+ }
+}
+
+var _ BufferPool = (*simpleBufferPool)(nil)
+
+// simpleBufferPool is an implementation of the BufferPool interface that
+// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
+// acquire a buffer from the pool but if that buffer is too small, it returns it
+// to the pool and creates a new one.
+type simpleBufferPool struct {
+ pool sync.Pool
+}
+
+func (p *simpleBufferPool) Get(size int) *[]byte {
+ bs, ok := p.pool.Get().(*[]byte)
+ if ok && cap(*bs) >= size {
+ *bs = (*bs)[:size]
+ return bs
+ }
+
+ // A buffer was pulled from the pool, but it is too small. Put it back in
+ // the pool and create one large enough.
+ if ok {
+ p.pool.Put(bs)
+ }
+
+ b := make([]byte, size)
+ return &b
+}
+
+func (p *simpleBufferPool) Put(buf *[]byte) {
+ p.pool.Put(buf)
+}
+
+var _ BufferPool = NopBufferPool{}
+
+// NopBufferPool is a buffer pool that returns new buffers without pooling.
+type NopBufferPool struct{}
+
+// Get returns a buffer with specified length from the pool.
+func (NopBufferPool) Get(length int) *[]byte {
+ b := make([]byte, length)
+ return &b
+}
+
+// Put returns a buffer to the pool.
+func (NopBufferPool) Put(*[]byte) {
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
new file mode 100644
index 000000000..228e9c2f2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -0,0 +1,226 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "io"
+)
+
+// BufferSlice offers a means to represent data that spans one or more Buffer
+// instances. A BufferSlice is meant to be immutable after creation, and methods
+// like Ref create and return copies of the slice. This is why all methods have
+// value receivers rather than pointer receivers.
+//
+// Note that any of the methods that read the underlying buffers such as Ref,
+// Len or CopyTo etc., will panic if any underlying buffers have already been
+// freed. It is recommended to not directly interact with any of the underlying
+// buffers directly, rather such interactions should be mediated through the
+// various methods on this type.
+//
+// By convention, any APIs that return (mem.BufferSlice, error) should reduce
+// the burden on the caller by never returning a mem.BufferSlice that needs to
+// be freed if the error is non-nil, unless explicitly stated.
+type BufferSlice []Buffer
+
+// Len returns the sum of the length of all the Buffers in this slice.
+//
+// # Warning
+//
+// Invoking the built-in len on a BufferSlice will return the number of buffers
+// in the slice, and *not* the value returned by this function.
+func (s BufferSlice) Len() int {
+ var length int
+ for _, b := range s {
+ length += b.Len()
+ }
+ return length
+}
+
+// Ref invokes Ref on each buffer in the slice.
+func (s BufferSlice) Ref() {
+ for _, b := range s {
+ b.Ref()
+ }
+}
+
+// Free invokes Buffer.Free() on each Buffer in the slice.
+func (s BufferSlice) Free() {
+ for _, b := range s {
+ b.Free()
+ }
+}
+
+// CopyTo copies each of the underlying Buffer's data into the given buffer,
+// returning the number of bytes copied. Has the same semantics as the copy
+// builtin in that it will copy as many bytes as it can, stopping when either dst
+// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
+func (s BufferSlice) CopyTo(dst []byte) int {
+ off := 0
+ for _, b := range s {
+ off += copy(dst[off:], b.ReadOnlyData())
+ }
+ return off
+}
+
+// Materialize concatenates all the underlying Buffer's data into a single
+// contiguous buffer using CopyTo.
+func (s BufferSlice) Materialize() []byte {
+ l := s.Len()
+ if l == 0 {
+ return nil
+ }
+ out := make([]byte, l)
+ s.CopyTo(out)
+ return out
+}
+
+// MaterializeToBuffer functions like Materialize except that it writes the data
+// to a single Buffer pulled from the given BufferPool.
+//
+// As a special case, if the input BufferSlice only actually has one Buffer, this
+// function simply increases the refcount before returning said Buffer. Freeing this
+// buffer won't release it until the BufferSlice is itself released.
+func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
+ if len(s) == 1 {
+ s[0].Ref()
+ return s[0]
+ }
+ sLen := s.Len()
+ if sLen == 0 {
+ return emptyBuffer{}
+ }
+ buf := pool.Get(sLen)
+ s.CopyTo(*buf)
+ return NewBuffer(buf, pool)
+}
+
+// Reader returns a new Reader for the input slice after taking references to
+// each underlying buffer.
+func (s BufferSlice) Reader() Reader {
+ s.Ref()
+ return &sliceReader{
+ data: s,
+ len: s.Len(),
+ }
+}
+
+// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
+// with other parts systems. It also provides an additional convenience method
+// Remaining(), which returns the number of unread bytes remaining in the slice.
+// Buffers will be freed as they are read.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+ // Close frees the underlying BufferSlice and never returns an error. Subsequent
+ // calls to Read will return (0, io.EOF).
+ Close() error
+ // Remaining returns the number of unread bytes remaining in the slice.
+ Remaining() int
+}
+
+type sliceReader struct {
+ data BufferSlice
+ len int
+ // The index into data[0].ReadOnlyData().
+ bufferIdx int
+}
+
+func (r *sliceReader) Remaining() int {
+ return r.len
+}
+
+func (r *sliceReader) Close() error {
+ r.data.Free()
+ r.data = nil
+ r.len = 0
+ return nil
+}
+
+func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+ if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
+ return false
+ }
+
+ r.data[0].Free()
+ r.data = r.data[1:]
+ r.bufferIdx = 0
+ return true
+}
+
+func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ for len(buf) != 0 && r.len != 0 {
+ // Copy as much as possible from the first Buffer in the slice into the
+ // given byte slice.
+ data := r.data[0].ReadOnlyData()
+ copied := copy(buf, data[r.bufferIdx:])
+ r.len -= copied // Reduce len by the number of bytes copied.
+ r.bufferIdx += copied // Increment the buffer index.
+ n += copied // Increment the total number of bytes read.
+ buf = buf[copied:] // Shrink the given byte slice.
+
+ // If we have copied all the data from the first Buffer, free it and advance to
+ // the next in the slice.
+ r.freeFirstBufferIfEmpty()
+ }
+
+ return n, nil
+}
+
+func (r *sliceReader) ReadByte() (byte, error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ // There may be any number of empty buffers in the slice, clear them all until a
+ // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
+ for r.freeFirstBufferIfEmpty() {
+ }
+
+ b := r.data[0].ReadOnlyData()[r.bufferIdx]
+ r.len--
+ r.bufferIdx++
+ // Free the first buffer in the slice if the last byte was read
+ r.freeFirstBufferIfEmpty()
+ return b, nil
+}
+
+var _ io.Writer = (*writer)(nil)
+
+type writer struct {
+ buffers *BufferSlice
+ pool BufferPool
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+ b := Copy(p, w.pool)
+ *w.buffers = append(*w.buffers, b)
+ return b.Len(), nil
+}
+
+// NewWriter wraps the given BufferSlice and BufferPool to implement the
+// io.Writer interface. Every call to Write copies the contents of the given
+// buffer into a new Buffer pulled from the given pool and the Buffer is added to
+// the given BufferSlice.
+func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
+ return &writer{buffers: buffers, pool: pool}
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go
new file mode 100644
index 000000000..ecbf0b9a7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffers.go
@@ -0,0 +1,268 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package mem provides utilities that facilitate memory reuse in byte slices
+// that are used as buffers.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
+// removed in a later release.
+package mem
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+)
+
+// A Buffer represents a reference counted piece of data (in bytes) that can be
+// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
+// released by calling Free(), which invokes the free function given at creation
+// only after all references are released.
+//
+// Note that a Buffer is not safe for concurrent access and instead each
+// goroutine should use its own reference to the data, which can be acquired via
+// a call to Ref().
+//
+// Attempts to access the underlying data after releasing the reference to the
+// Buffer will panic.
+type Buffer interface {
+ // ReadOnlyData returns the underlying byte slice. Note that it is undefined
+ // behavior to modify the contents of this slice in any way.
+ ReadOnlyData() []byte
+ // Ref increases the reference counter for this Buffer.
+ Ref()
+ // Free decrements this Buffer's reference counter and frees the underlying
+ // byte slice if the counter reaches 0 as a result of this call.
+ Free()
+ // Len returns the Buffer's size.
+ Len() int
+
+ split(n int) (left, right Buffer)
+ read(buf []byte) (int, Buffer)
+}
+
+var (
+ bufferPoolingThreshold = 1 << 10
+
+ bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
+ refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
+)
+
+// IsBelowBufferPoolingThreshold returns true if the given size is less than or
+// equal to the threshold for buffer pooling. This is used to determine whether
+// to pool buffers or allocate them directly.
+func IsBelowBufferPoolingThreshold(size int) bool {
+ return size <= bufferPoolingThreshold
+}
+
+type buffer struct {
+ origData *[]byte
+ data []byte
+ refs *atomic.Int32
+ pool BufferPool
+}
+
+func newBuffer() *buffer {
+ return bufferObjectPool.Get().(*buffer)
+}
+
+// NewBuffer creates a new Buffer from the given data, initializing the reference
+// counter to 1. The data will then be returned to the given pool when all
+// references to the returned Buffer are released. As a special case to avoid
+// additional allocations, if the given buffer pool is nil, the returned buffer
+// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
+// underlying data is never freed.
+//
+// Note that the backing array of the given data is not copied.
+func NewBuffer(data *[]byte, pool BufferPool) Buffer {
+ // Use the buffer's capacity instead of the length, otherwise buffers may
+ // not be reused under certain conditions. For example, if a large buffer
+ // is acquired from the pool, but fewer bytes than the buffering threshold
+ // are written to it, the buffer will not be returned to the pool.
+ if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) {
+ return (SliceBuffer)(*data)
+ }
+ b := newBuffer()
+ b.origData = data
+ b.data = *data
+ b.pool = pool
+ b.refs = refObjectPool.Get().(*atomic.Int32)
+ b.refs.Add(1)
+ return b
+}
+
+// Copy creates a new Buffer from the given data, initializing the reference
+// counter to 1.
+//
+// It acquires a []byte from the given pool and copies over the backing array
+// of the given data. The []byte acquired from the pool is returned to the
+// pool when all references to the returned Buffer are released.
+func Copy(data []byte, pool BufferPool) Buffer {
+ if IsBelowBufferPoolingThreshold(len(data)) {
+ buf := make(SliceBuffer, len(data))
+ copy(buf, data)
+ return buf
+ }
+
+ buf := pool.Get(len(data))
+ copy(*buf, data)
+ return NewBuffer(buf, pool)
+}
+
+func (b *buffer) ReadOnlyData() []byte {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+ return b.data
+}
+
+func (b *buffer) Ref() {
+ if b.refs == nil {
+ panic("Cannot ref freed buffer")
+ }
+ b.refs.Add(1)
+}
+
+func (b *buffer) Free() {
+ if b.refs == nil {
+ panic("Cannot free freed buffer")
+ }
+
+ refs := b.refs.Add(-1)
+ switch {
+ case refs > 0:
+ return
+ case refs == 0:
+ if b.pool != nil {
+ b.pool.Put(b.origData)
+ }
+
+ refObjectPool.Put(b.refs)
+ b.origData = nil
+ b.data = nil
+ b.refs = nil
+ b.pool = nil
+ bufferObjectPool.Put(b)
+ default:
+ panic("Cannot free freed buffer")
+ }
+}
+
+func (b *buffer) Len() int {
+ return len(b.ReadOnlyData())
+}
+
+func (b *buffer) split(n int) (Buffer, Buffer) {
+ if b.refs == nil {
+ panic("Cannot split freed buffer")
+ }
+
+ b.refs.Add(1)
+ split := newBuffer()
+ split.origData = b.origData
+ split.data = b.data[n:]
+ split.refs = b.refs
+ split.pool = b.pool
+
+ b.data = b.data[:n]
+
+ return b, split
+}
+
+func (b *buffer) read(buf []byte) (int, Buffer) {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+
+ n := copy(buf, b.data)
+ if n == len(b.data) {
+ b.Free()
+ return n, nil
+ }
+
+ b.data = b.data[n:]
+ return n, b
+}
+
+func (b *buffer) String() string {
+ return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
+}
+
+// ReadUnsafe reads bytes from the given Buffer into the provided slice.
+// It does not perform safety checks.
+func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
+ return buf.read(dst)
+}
+
+// SplitUnsafe modifies the receiver to point to the first n bytes while it
+// returns a new reference to the remaining bytes. The returned Buffer
+// functions just like a normal reference acquired using Ref().
+func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
+ return buf.split(n)
+}
+
+type emptyBuffer struct{}
+
+func (e emptyBuffer) ReadOnlyData() []byte {
+ return nil
+}
+
+func (e emptyBuffer) Ref() {}
+func (e emptyBuffer) Free() {}
+
+func (e emptyBuffer) Len() int {
+ return 0
+}
+
+func (e emptyBuffer) split(int) (left, right Buffer) {
+ return e, e
+}
+
+func (e emptyBuffer) read([]byte) (int, Buffer) {
+ return 0, e
+}
+
+// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides
+// methods for reading, splitting, and managing the byte slice.
+type SliceBuffer []byte
+
+// ReadOnlyData returns the byte slice.
+func (s SliceBuffer) ReadOnlyData() []byte { return s }
+
+// Ref is a noop implementation of Ref.
+func (s SliceBuffer) Ref() {}
+
+// Free is a noop implementation of Free.
+func (s SliceBuffer) Free() {}
+
+// Len is a noop implementation of Len.
+func (s SliceBuffer) Len() int { return len(s) }
+
+func (s SliceBuffer) split(n int) (left, right Buffer) {
+ return s[:n], s[n:]
+}
+
+func (s SliceBuffer) read(buf []byte) (int, Buffer) {
+ n := copy(buf, s)
+ if n == len(s) {
+ return n, nil
+ }
+ return n, s[n:]
+}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index a2cdcaf12..d2e15253b 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -25,8 +25,14 @@ import (
"context"
"fmt"
"strings"
+
+ "google.golang.org/grpc/internal"
)
+func init() {
+ internal.FromOutgoingContextRaw = fromOutgoingContextRaw
+}
+
// DecodeKeyValue returns k, v, nil.
//
// Deprecated: use k and v directly instead.
@@ -153,14 +159,16 @@ func Join(mds ...MD) MD {
type mdIncomingKey struct{}
type mdOutgoingKey struct{}
-// NewIncomingContext creates a new context with incoming md attached.
+// NewIncomingContext creates a new context with incoming md attached. md must
+// not be modified after calling this function.
func NewIncomingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdIncomingKey{}, md)
}
// NewOutgoingContext creates a new context with outgoing md attached. If used
// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
-// overwrite any previously-appended metadata.
+// overwrite any previously-appended metadata. md must not be modified after
+// calling this function.
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
}
@@ -203,12 +211,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
}
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
-// key from the incoming metadata if it exists. Key must be lower-case.
-//
-// # Experimental
-//
-// Notice: This API is EXPERIMENTAL and may be changed or removed in a
-// later release.
+// key from the incoming metadata if it exists. Keys are matched in a case insensitive
+// manner.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(mdIncomingKey{}).(MD)
if !ok {
@@ -219,33 +223,29 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
return copyOf(v)
}
for k, v := range md {
- // We need to manually convert all keys to lower case, because MD is a
- // map, and there's no guarantee that the MD attached to the context is
- // created using our helper functions.
- if strings.ToLower(k) == key {
+ // Case insensitive comparison: MD is a map, and there's no guarantee
+ // that the MD attached to the context is created using our helper
+ // functions.
+ if strings.EqualFold(k, key) {
return copyOf(v)
}
}
return nil
}
-// the returned slice must not be modified in place
func copyOf(v []string) []string {
vals := make([]string, len(v))
copy(vals, v)
return vals
}
-// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
+// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
//
// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
// is a map, there's no guarantee it's created using our helper functions) and
// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
// lowercase).
-//
-// This is intended for gRPC-internal use ONLY. Users should use
-// FromOutgoingContext instead.
-func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
+func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
if !ok {
return nil, nil, false
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
index e01d219ff..499a49c8c 100644
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -22,7 +22,9 @@ package peer
import (
"context"
+ "fmt"
"net"
+ "strings"
"google.golang.org/grpc/credentials"
)
@@ -32,11 +34,41 @@ import (
type Peer struct {
// Addr is the peer address.
Addr net.Addr
+ // LocalAddr is the local address.
+ LocalAddr net.Addr
// AuthInfo is the authentication information of the transport.
// It is nil if there is no transport security being used.
AuthInfo credentials.AuthInfo
}
+// String ensures the Peer types implements the Stringer interface in order to
+// allow to print a context with a peerKey value effectively.
+func (p *Peer) String() string {
+ if p == nil {
+ return "Peer"
+ }
+ sb := &strings.Builder{}
+ sb.WriteString("Peer{")
+ if p.Addr != nil {
+ fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String())
+ } else {
+ fmt.Fprintf(sb, "Addr: , ")
+ }
+ if p.LocalAddr != nil {
+ fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String())
+ } else {
+ fmt.Fprintf(sb, "LocalAddr: , ")
+ }
+ if p.AuthInfo != nil {
+ fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType())
+ } else {
+ fmt.Fprintf(sb, "AuthInfo: